input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Persistence of cluster configuration.
"""
from base64 import b16encode
from calendar import timegm
from datetime import datetime
from json import dumps, loads
from mmh3 import hash_bytes as mmh3_hash_bytes
from uuid import UUID
from collections import Set, Mapping, Iterable
from eliot import Logger, write_traceback, MessageType, Field, ActionType
from pyrsistent import PRecord, PVector, PMap, PSet, pmap, PClass
from pytz import UTC
from twisted.python.filepath import FilePath
from twisted.application.service import Service, MultiService
from twisted.internet.defer import succeed
from twisted.internet.task import LoopingCall
from weakref import WeakKeyDictionary
from ._model import (
SERIALIZABLE_CLASSES, Deployment, Configuration, GenerationHash
)
# The class at the root of the configuration tree.
ROOT_CLASS = Deployment
# Serialization marker storing the class name:
_CLASS_MARKER = u"$__class__$"
# The latest configuration version. Configuration versions are
# always integers.
_CONFIG_VERSION = 6
# Map of serializable class names to classes
_CONFIG_CLASS_MAP = {cls.__name__: cls for cls in SERIALIZABLE_CLASSES}
class ConfigurationMigrationError(Exception):
"""
Error raised when a configuration migration is unable to
complete successfully.
"""
class MissingMigrationError(Exception):
"""
Error raised when a configuration migration method cannot be found.
"""
def __init__(self, source_version, target_version):
"""
Initialize a missing migration exception.
:param int source_version: The version to migrate from.
:param int target_version: The version to migrate to.
"""
self.source_version = source_version
self.target_version = target_version
self.message = (
u"Unable to find a migration path for a version {source} "
u"to version {target} configuration. No migration method "
u"upgrade_from_v{source} could be found.".format(
source=self.source_version, target=self.target_version
)
)
super(MissingMigrationError, self).__init__(self.message)
def migrate_configuration(source_version, target_version,
config, migration_class):
"""
Migrate a persisted configuration from one version to another
in sequential upgrades, e.g. a source version of 1 and target
version of 3 will perform two upgrades, from version 1 to 2,
followed by 2 to 3.
Calls the correct ``migration_class`` class methods for
sequential upgrades between the suppled source and target versions.
:param int source_version: The version to migrate from.
:param int target_version: The version to migrate to.
:param bytes config: The source configuration blob.
:param class migration_class: The class containing the methods
that will be used for migration.
:return bytes: The updated configuration blob after migration.
:raises MissingMigrationError: Raises this exception if any of the
required upgrade methods cannot be found in the supplied migration
class, before attempting to execute any upgrade paths.
"""
upgraded_config = config
current_version = source_version
migrations_sequence = []
for upgrade_version in range(source_version + 1, target_version + 1):
with _LOG_UPGRADE(configuration=upgraded_config,
source_version=current_version,
target_version=upgrade_version):
migration_method = u"upgrade_from_v%d" % current_version
migration = getattr(migration_class, migration_method, None)
if migration is None:
raise MissingMigrationError(current_version, upgrade_version)
migrations_sequence.append(migration)
current_version += 1
for migration in migrations_sequence:
upgraded_config = migration(upgraded_config)
return upgraded_config
class ConfigurationMigration(object):
"""
Migrate a JSON configuration from one version to another.
"""
@classmethod
def upgrade_from_v1(cls, config):
"""
Migrate a v1 JSON configuration to v2.
:param bytes config: The v1 JSON data.
:return bytes: The v2 JSON data.
"""
v1_config = loads(config)
v2_config = {
_CLASS_MARKER: u"Configuration",
u"version": 2,
u"deployment": v1_config
}
return dumps(v2_config)
@classmethod
def upgrade_from_v2(cls, config):
"""
Migrate a v2 JSON configuration to v3.
:param bytes config: The v2 JSON data.
:return bytes: The v3 JSON data.
"""
decoded_config = loads(config)
decoded_config[u"version"] = 3
decoded_config[u"deployment"][u"leases"] = {
u"values": [], _CLASS_MARKER: u"PMap",
}
return dumps(decoded_config)
@classmethod
def upgrade_from_v3(cls, config):
"""
Migrate a v3 JSON configuration to v4.
:param bytes config: The v3 JSON data.
:return bytes: The v4 JSON data.
"""
decoded_config = loads(config)
decoded_config[u"version"] = 4
decoded_config[u"deployment"][u"persistent_state"] = {
_CLASS_MARKER: u"PersistentState",
u"blockdevice_ownership": {
u"values": [], _CLASS_MARKER: "PMap",
},
}
return dumps(decoded_config)
@classmethod
def upgrade_from_v4(cls, config):
"""
Migrate a v4 JSON configuration to v5.
:param bytes config: The v4 JSON data.
:return bytes: The v5 JSON data.
"""
decoded_config = loads(config)
decoded_config[u"version"] = 5
try:
nodes = decoded_config[u"deployment"][u"nodes"]
except KeyError:
pass
else:
new_node_values = []
for n in nodes:
new_node = n
new_node[u"applications"] = {
u"values": [(a[u"name"], a) for a in n[u"applications"]],
_CLASS_MARKER: "PMap"
}
new_node_values.append((new_node["uuid"], new_node))
decoded_config[u"deployment"][u"nodes"] = {
u"values": new_node_values,
_CLASS_MARKER: "PMap"
}
return dumps(decoded_config)
@classmethod
def upgrade_from_v5(cls, config):
"""
Migrate a v5 JSON configuration to v6.
:param bytes config: The v5 JSON data.
:return bytes: The v6 JSON data.
"""
decoded_config = loads(config)
decoded_config[u"version"] = 6
try:
nodes = decoded_config[u"deployment"][u"nodes"]
except KeyError:
pass
else:
new_node_values = []
for node in nodes[u"values"]:
uuid = node[0]
applications = node[1][u"applications"][u"values"]
for app in applications:
app[1].update({u'swappiness': 0})
new_node = node[1]
new_node[u"applications"][u"values"] = applications
new_node_values.append((uuid, new_node))
decoded_config[u"deployment"][u"nodes"] = {
u"values": new_node_values,
_CLASS_MARKER: "PMap"
}
return dumps(decoded_config)
def _to_serializables(obj):
"""
This function turns assorted types into serializable objects (objects that
can be serialized by the default JSON encoder). Note that this is done
shallowly for containers. For example, ``PClass``es will be turned into
dicts, but the values and keys of the dict might still not be serializable.
It is up to higher layers to traverse containers recursively to achieve
full serialization.
:param obj: The object to serialize.
:returns: An object that is shallowly JSON serializable.
"""
if isinstance(obj, PRecord):
result = dict(obj)
result[_CLASS_MARKER] = obj.__class__.__name__
return result
elif isinstance(obj, PClass):
result = obj._to_dict()
result[_CLASS_MARKER] = obj.__class__.__name__
return result
elif isinstance(obj, PMap):
return {_CLASS_MARKER: u"PMap", u"values": dict(obj).items()}
elif isinstance(obj, (PSet, PVector, set)):
return list(obj)
elif isinstance(obj, FilePath):
return {_CLASS_MARKER: u"FilePath",
u"path": obj.path.decode("utf-8")}
elif isinstance(obj, UUID):
return {_CLASS_MARKER: u"UUID",
"hex": unicode(obj)}
elif isinstance(obj, datetime):
if obj.tzinfo is None:
raise ValueError(
"Datetime without a timezone: {}".format(obj))
return {_CLASS_MARKER: u"datetime",
"seconds": timegm(obj.utctimetuple())}
return obj
def _is_pyrsistent(obj):
"""
Boolean check if an object is an instance of a pyrsistent object.
"""
return isinstance(obj, (PRecord, PClass, PMap, PSet, PVector))
_BASIC_JSON_TYPES = frozenset([str, unicode, int, long, float, bool])
_BASIC_JSON_LISTS = frozenset([list, tuple])
_BASIC_JSON_COLLECTIONS = frozenset([dict]).union(_BASIC_JSON_LISTS)
_UNCACHED_SENTINEL = object()
_cached_dfs_serialize_cache = WeakKeyDictionary()
def _cached_dfs_serialize(input_object):
"""
This serializes an input object into something that can be serialized by
the python json encoder.
This caches the serialization of pyrsistent objects in a
``WeakKeyDictionary``, so the cache should be automatically cleared when
the input object that is cached is destroyed.
:returns: An entirely serializable version of input_object.
"""
# Ensure this is a quick function for basic types:
if input_object is None:
return None
# Note that ``type(x) in frozenset([str, int])`` is faster than
# ``isinstance(x, (str, int))``.
input_type = type(input_object)
if input_type in _BASIC_JSON_TYPES:
return input_object
is_pyrsistent = False
if input_type in _BASIC_JSON_COLLECTIONS:
# Don't send basic collections through shallow object serialization,
# isinstance is not a very cheap operation.
obj = input_object
else:
if _is_pyrsistent(input_object):
is_pyrsistent = True
# Using ``dict.get`` and a sentinel rather than the more pythonic
# try/except KeyError for performance. This function is highly
# recursive and the KeyError is guaranteed to happen the first
# time every object is serialized. We do not want to incur the cost
# of a caught exception for every pyrsistent object ever
# serialized.
cached_value = _cached_dfs_serialize_cache.get(input_object,
_UNCACHED_SENTINEL)
if cached_value is not _UNCACHED_SENTINEL:
return cached_value
obj = _to_serializables(input_object)
result = obj
obj_type = type(obj)
if obj_type == dict:
result = dict((_cached_dfs_serialize(key),
_cached_dfs_serialize(value))
for key, value in obj.iteritems())
elif obj_type == list or obj_type == tuple:
result = list(_cached_dfs_serialize(x) for x in obj)
if is_pyrsistent:
_cached_dfs_serialize_cache[input_object] = result
return result
# A couple tokens that are used below in the generation hash.
_NULLSET_TOKEN = mmh3_hash_bytes(b'NULLSET')
_MAPPING_TOKEN = mm<PASSWORD>_<PASSWORD>(b'MAPPING')
_STR_TOKEN = mm<PASSWORD>_<PASSWORD>(b'STRING')
_generation_hash_cache = WeakKeyDictionary()
def _xor_bytes(aggregating_bytearray, updating_bytes):
"""
Aggregate bytes into a bytearray using XOR.
This function has a somewhat particular function signature in order for it
to be compatible with a call to `reduce`
:param bytearray aggregating_bytearray: Resulting bytearray to aggregate
the XOR of both input arguments byte-by-byte.
:param bytes updating_bytes: Additional bytes to be aggregated into the
other argument. It is assumed that this has the same size as
aggregating_bytearray.
:returns: aggregating_bytearray, after it has been modified by XORing all
of the bytes in the input bytearray with ``updating_bytes``.
"""
for i in xrange(len(aggregating_bytearray)):
aggregating_bytearray[i] ^= ord(updating_bytes[i])
return aggregating_bytearray
def generation_hash(input_object):
"""
This computes the mmh3 hash for an input object, providing a consistent
hash of deeply persistent objects across python nodes and implementations.
:returns: An mmh3 hash of input_object.
"""
# Ensure this is a quick function for basic types:
# Note that ``type(x) in frozenset([str, | |
eighteenByteRipe
))
return queues.apiAddressGeneratorReturnQueue.get()
def HandleCreateChan(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters.')
elif len(params) == 1:
passphrase, = params
passphrase = self._decode(passphrase, "base64")
if len(passphrase) == 0:
raise APIError(1, 'The specified passphrase is blank.')
# It would be nice to make the label the passphrase but it is
# possible that the passphrase contains non-utf-8 characters.
try:
unicode(passphrase, 'utf-8')
label = str_chan + ' ' + passphrase
except:
label = str_chan + ' ' + repr(passphrase)
addressVersionNumber = 4
streamNumber = 1
queues.apiAddressGeneratorReturnQueue.queue.clear()
logger.debug(
'Requesting that the addressGenerator create chan %s.', passphrase)
queues.addressGeneratorQueue.put((
'createChan', addressVersionNumber, streamNumber, label,
passphrase, True
))
queueReturn = queues.apiAddressGeneratorReturnQueue.get()
if len(queueReturn) == 0:
raise APIError(24, 'Chan address is already present.')
address = queueReturn[0]
return address
def HandleJoinChan(self, params):
if len(params) < 2:
raise APIError(0, 'I need two parameters.')
elif len(params) == 2:
passphrase, suppliedAddress = params
passphrase = self._decode(passphrase, "base64")
if len(passphrase) == 0:
raise APIError(1, 'The specified passphrase is blank.')
# It would be nice to make the label the passphrase but it is
# possible that the passphrase contains non-utf-8 characters.
try:
unicode(passphrase, 'utf-8')
label = str_chan + ' ' + passphrase
except:
label = str_chan + ' ' + repr(passphrase)
status, addressVersionNumber, streamNumber, toRipe = \
self._verifyAddress(suppliedAddress)
suppliedAddress = addBMIfNotPresent(suppliedAddress)
queues.apiAddressGeneratorReturnQueue.queue.clear()
queues.addressGeneratorQueue.put((
'joinChan', suppliedAddress, label, passphrase, True
))
addressGeneratorReturnValue = \
queues.apiAddressGeneratorReturnQueue.get()
if addressGeneratorReturnValue[0] == \
'chan name does not match address':
raise APIError(18, 'Chan name does not match address.')
if len(addressGeneratorReturnValue) == 0:
raise APIError(24, 'Chan address is already present.')
# TODO: this variable is not used to anything
# in case we ever want it for anything.
# createdAddress = addressGeneratorReturnValue[0]
return "success"
def HandleLeaveChan(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters.')
elif len(params) == 1:
address, = params
status, addressVersionNumber, streamNumber, toRipe = \
self._verifyAddress(address)
address = addBMIfNotPresent(address)
if not BMConfigParser().has_section(address):
raise APIError(
13, 'Could not find this address in your keys.dat file.')
if not BMConfigParser().safeGetBoolean(address, 'chan'):
raise APIError(
25, 'Specified address is not a chan address.'
' Use deleteAddress API call instead.')
BMConfigParser().remove_section(address)
with open(state.appdata + 'keys.dat', 'wb') as configfile:
BMConfigParser().write(configfile)
return 'success'
def HandleDeleteAddress(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters.')
elif len(params) == 1:
address, = params
status, addressVersionNumber, streamNumber, toRipe = \
self._verifyAddress(address)
address = addBMIfNotPresent(address)
if not BMConfigParser().has_section(address):
raise APIError(
13, 'Could not find this address in your keys.dat file.')
BMConfigParser().remove_section(address)
with open(state.appdata + 'keys.dat', 'wb') as configfile:
BMConfigParser().write(configfile)
queues.UISignalQueue.put(('rerenderMessagelistFromLabels', ''))
queues.UISignalQueue.put(('rerenderMessagelistToLabels', ''))
shared.reloadMyAddressHashes()
return 'success'
def HandleGetAllInboxMessages(self, params):
queryreturn = sqlQuery(
"SELECT msgid, toaddress, fromaddress, subject, received, message,"
" encodingtype, read FROM inbox where folder='inbox'"
" ORDER BY received"
)
data = '{"inboxMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, \
encodingtype, read = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
if len(data) > 25:
data += ','
data += json.dumps({
'msgid': hexlify(msgid),
'toAddress': toAddress,
'fromAddress': fromAddress,
'subject': base64.b64encode(subject),
'message': base64.b64encode(message),
'encodingType': encodingtype,
'receivedTime': received,
'read': read}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetAllInboxMessageIds(self, params):
queryreturn = sqlQuery(
"SELECT msgid FROM inbox where folder='inbox' ORDER BY received")
data = '{"inboxMessageIds":['
for row in queryreturn:
msgid = row[0]
if len(data) > 25:
data += ','
data += json.dumps(
{'msgid': hexlify(msgid)}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetInboxMessageById(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
elif len(params) == 1:
msgid = self._decode(params[0], "hex")
elif len(params) >= 2:
msgid = self._decode(params[0], "hex")
readStatus = params[1]
if not isinstance(readStatus, bool):
raise APIError(
23, 'Bool expected in readStatus, saw %s instead.' %
type(readStatus))
queryreturn = sqlQuery(
"SELECT read FROM inbox WHERE msgid=?", msgid)
# UPDATE is slow, only update if status is different
if queryreturn != [] and (queryreturn[0][0] == 1) != readStatus:
sqlExecute(
"UPDATE inbox set read = ? WHERE msgid=?",
readStatus, msgid)
queues.UISignalQueue.put(('changedInboxUnread', None))
queryreturn = sqlQuery(
"SELECT msgid, toaddress, fromaddress, subject, received, message,"
" encodingtype, read FROM inbox WHERE msgid=?", msgid
)
data = '{"inboxMessage":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, \
encodingtype, read = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
data += json.dumps({
'msgid': hexlify(msgid),
'toAddress': toAddress,
'fromAddress': fromAddress,
'subject': base64.b64encode(subject),
'message': base64.b64encode(message),
'encodingType': encodingtype,
'receivedTime': received,
'read': read}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetAllSentMessages(self, params):
queryreturn = sqlQuery(
"SELECT msgid, toaddress, fromaddress, subject, lastactiontime,"
" message, encodingtype, status, ackdata FROM sent"
" WHERE folder='sent' ORDER BY lastactiontime"
)
data = '{"sentMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, lastactiontime, message, \
encodingtype, status, ackdata = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
if len(data) > 25:
data += ','
data += json.dumps({
'msgid': hexlify(msgid),
'toAddress': toAddress,
'fromAddress': fromAddress,
'subject': base64.b64encode(subject),
'message': base64.b64encode(message),
'encodingType': encodingtype,
'lastActionTime': lastactiontime,
'status': status,
'ackData': hexlify(ackdata)}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetAllSentMessageIds(self, params):
queryreturn = sqlQuery(
"SELECT msgid FROM sent where folder='sent'"
" ORDER BY lastactiontime"
)
data = '{"sentMessageIds":['
for row in queryreturn:
msgid = row[0]
if len(data) > 25:
data += ','
data += json.dumps(
{'msgid': hexlify(msgid)}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleInboxMessagesByReceiver(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
toAddress = params[0]
queryreturn = sqlQuery(
"SELECT msgid, toaddress, fromaddress, subject, received, message,"
" encodingtype FROM inbox WHERE folder='inbox' AND toAddress=?",
toAddress)
data = '{"inboxMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, \
encodingtype = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
if len(data) > 25:
data += ','
data += json.dumps({
'msgid': hexlify(msgid),
'toAddress': toAddress,
'fromAddress': fromAddress,
'subject': base64.b64encode(subject),
'message': base64.b64encode(message),
'encodingType': encodingtype,
'receivedTime': received}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetSentMessageById(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
msgid = self._decode(params[0], "hex")
queryreturn = sqlQuery(
"SELECT msgid, toaddress, fromaddress, subject, lastactiontime,"
" message, encodingtype, status, ackdata FROM sent WHERE msgid=?",
msgid
)
data = '{"sentMessage":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, lastactiontime, message, \
encodingtype, status, ackdata = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
data += json.dumps({
'msgid': hexlify(msgid),
'toAddress': toAddress,
'fromAddress': fromAddress,
'subject': base64.b64encode(subject),
'message': base64.b64encode(message),
'encodingType': encodingtype,
'lastActionTime': lastactiontime,
'status': status,
'ackData': hexlify(ackdata)}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetSentMessagesBySender(self, params): # HandleGetSentMessagesByAddress
if len(params) == 0:
raise APIError(0, 'I need parameters!')
fromAddress = params[0]
queryreturn = sqlQuery(
"SELECT msgid, toaddress, fromaddress, subject, lastactiontime,"
" message, encodingtype, status, ackdata FROM sent"
" WHERE folder='sent' AND fromAddress=? ORDER BY lastactiontime",
fromAddress
)
data = '{"sentMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, lastactiontime, message, \
encodingtype, status, ackdata = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
if len(data) > 25:
data += ','
data += json.dumps({
'msgid': hexlify(msgid),
'toAddress': toAddress,
'fromAddress': fromAddress,
'subject': base64.b64encode(subject),
'message': base64.b64encode(message),
'encodingType': encodingtype,
'lastActionTime': lastactiontime,
'status': status,
'ackData': hexlify(ackdata)}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleGetSentMessagesByAckData(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
ackData = self._decode(params[0], "hex")
queryreturn = sqlQuery(
"SELECT msgid, toaddress, fromaddress, subject, lastactiontime,"
" message, encodingtype, status, ackdata FROM sent"
" WHERE ackdata=?", ackData
)
data = '{"sentMessage":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, lastactiontime, message, \
encodingtype, status, ackdata = row
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
message = shared.fixPotentiallyInvalidUTF8Data(message)
data += json.dumps({
'msgid': hexlify(msgid),
'toAddress': toAddress,
'fromAddress': fromAddress,
'subject': base64.b64encode(subject),
'message': base64.b64encode(message),
'encodingType': encodingtype,
'lastActionTime': lastactiontime,
'status': status,
'ackData': hexlify(ackdata)}, indent=4, separators=(',', ': '))
data += ']}'
return data
def HandleTrashMessage(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
msgid = self._decode(params[0], "hex")
# Trash if in inbox table
helper_inbox.trash(msgid)
# Trash if in sent table
sqlExecute('''UPDATE sent SET folder='trash' WHERE msgid=?''', msgid)
return 'Trashed message (assuming message existed).'
def HandleTrashInboxMessage(self, params):
if len(params) == 0:
raise APIError(0, 'I need parameters!')
msgid = self._decode(params[0], "hex")
helper_inbox.trash(msgid)
return 'Trashed inbox message (assuming | |
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'bookmarks',
'pins',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method load_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `load_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'bookmarks' in local_var_params and local_var_params['bookmarks'] is not None: # noqa: E501
query_params.append(('bookmarks', local_var_params['bookmarks'])) # noqa: E501
if 'pins' in local_var_params and local_var_params['pins'] is not None: # noqa: E501
query_params.append(('pins', local_var_params['pins'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/load', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1LoadTagsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_tag(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Patch tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_tag(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_tag_with_http_info(owner, tag_name, body, **kwargs) # noqa: E501
def patch_tag_with_http_info(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Patch tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_tag_with_http_info(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'tag_name',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `patch_tag`") # noqa: E501
# verify the required parameter 'tag_name' is set
if self.api_client.client_side_validation and ('tag_name' not in local_var_params or # noqa: E501
local_var_params['tag_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tag_name` when calling `patch_tag`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'tag_name' in local_var_params:
path_params['tag.name'] = local_var_params['tag_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{tag.name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sync_tags(self, owner, body, **kwargs): # noqa: E501
"""Sync tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sync_tags(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1EntitiesTags body: Data (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sync_tags_with_http_info(owner, body, **kwargs) # noqa: E501
def sync_tags_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Sync tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sync_tags_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1EntitiesTags body: Data (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sync_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `sync_tags`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `sync_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: | |
#
from __future__ import division
import timeit
from sklearn import preprocessing
import numpy as np
import pandas as pd
import multiprocessing
import matplotlib.pyplot as plt
from IOHMM import UnSupervisedIOHMM
from IOHMM import OLS, DiscreteMNL, CrossEntropyMNL
from IOHMM import forward_backward
from scipy.special import logsumexp
import pickle
from copy import deepcopy
import random
from sklearn.decomposition import PCA
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.feature_selection import SelectPercentile, chi2, SelectFromModel, f_regression
from sklearn.svm import LinearSVC
from sklearn.linear_model import LassoCV, Lasso
from sklearn.metrics import r2_score
from sklearn.ensemble import ExtraTreesClassifier
import os
Accurate_duration=[]
#filename1='data/activity_index_test.txt'
#file1=open(filename1,'r')
#activity_index_test=eval(file1.read())
activity_index_test = {}
def process_data(Card_ID, data, test_proportion, C, dependent_variables, percent_feature, test_last, model_based_select, SCALAR_DURATION):
data.loc[data['duration_last']==-1,'duration_last'] = 0 # first activity, assign to 0
data['if_first'] = 0
data.loc[data['act_ID'] == 0, 'if_first'] = 1
column_list = list(data.columns.values)
location_list = []
hour_list = []
for ele in column_list:
if 'location' in ele:
location_list.append(ele)
if 'hour' in ele:
hour_list.append(ele)
location_list.remove('location_o')
location_list.remove('location')
hour_list.remove('hour')
hour_list.remove('duration_hour')
# set covariates to this OLS model
weather_list=['rain','heavy_rain','sun','cloud','Avrg_Temp','fengli']
Weekday_list=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
holiday_list=['National_holiday','Observance']
last_activity=['duration_last','duration_trip']
previous_trips = ['Last_trip_time_yesterday','N_days_withtrip_past20',
'N_consec_days_no_trips','N_trips_yesterday']
Other = ['if_first']
Ut_list=weather_list + hour_list + Weekday_list+ location_list + holiday_list +last_activity + previous_trips + Other
# U1_list=Weekday_list+weather_list + holiday_list
x_array = np.array(data.loc[:,Ut_list])
min_max_scaler = preprocessing.MinMaxScaler()
x_array_minmax = min_max_scaler.fit_transform(x_array)
y = np.array(data.loc[:,dependent_variables])
print(x_array_minmax.shape)
if C == -1 and percent_feature == -1:
Ut_list_1 = []
Ut_list_2 = []
Ut_list_new = Ut_list
else:
# ============
if model_based_select:
if len(dependent_variables) >0:
lsvc = LinearSVC(C=C, penalty="l1", dual=False).fit(x_array_minmax, y[:,1])
Feature_select2 = SelectFromModel(lsvc, prefit=True)
else:
lsvc = LinearSVC(C = C, penalty="l1", dual=False).fit(x_array_minmax, y)
Feature_select = SelectFromModel(lsvc, prefit=True)
#----------
# clf = ExtraTreesClassifier(n_estimators=50)
# clf = clf.fit(x_array_minmax, y)
# Feature_select = SelectFromModel(clf, prefit=True)
#----------
else:
if len(dependent_variables) > 0:
Feature_select2 = SelectPercentile(chi2, percentile=percent_feature).fit(x_array_minmax, y[:,1])
Feature_select1 = SelectPercentile(f_regression, percentile=percent_feature).fit(x_array_minmax, y[:,0])
a=1
else:
Feature_select = SelectPercentile(chi2, percentile=percent_feature).fit(x_array_minmax, y)
# ============
if len(dependent_variables) > 0:
# thresh2 = Feature_select2.threshold_
# X_new2 = Feature_select2.transform(x_array_minmax)
if model_based_select:
idx_features2 = Feature_select2.get_support(indices = True)
num_feature = len(idx_features2)
clf = LassoCV().fit(x_array_minmax, y[:, 0])
importance = np.abs(clf.coef_)
idx_thresh = importance.argsort()[-num_feature]
threshold = importance[idx_thresh]
sfm = SelectFromModel(clf, threshold=threshold)
sfm.fit(x_array_minmax, y[:, 0])
# X_new1 = sfm.transform(x_array_minmax)
idx_features1 = sfm.get_support(indices = True)
used_feature_index = list(set(idx_features2).union(idx_features1))
Ut_list_new = [Ut_list[i] for i in used_feature_index]
Ut_list_1 = [Ut_list[i] for i in idx_features1]
Ut_list_2 = [Ut_list[i] for i in idx_features2]
else:
idx_features2 = Feature_select2.get_support(indices = True)
idx_features1 = Feature_select1.get_support(indices = True)
# assert len(idx_features1) == len(idx_features2)
used_feature_index = list(set(idx_features2).union(idx_features1))
Ut_list_new = [Ut_list[i] for i in used_feature_index]
Ut_list_1 = [Ut_list[i] for i in idx_features1]
Ut_list_2 = [Ut_list[i] for i in idx_features2]
else:
X_new = Feature_select.transform(x_array_minmax)
# Ut_list_new = [Ut_list[i] for i in range(len(Ut_list)) if used_feature_index[i]]
# print(X_new.shape)
data.loc[:,Ut_list] = x_array_minmax
if SCALAR_DURATION:
min_max_scaler_dep = preprocessing.MinMaxScaler()
data[dependent_variables[0]] = min_max_scaler_dep.fit_transform(data[[dependent_variables[0]]])
else:
min_max_scaler_dep = None
total_days = data['seq_ID'].max()
train_days = int(total_days - round(total_days*test_proportion))
if test_last:
# last 30 days
data_train = data.loc[data['seq_ID']<=train_days]
data_test = data.loc[data['seq_ID']>train_days]
else:
random.seed(Card_ID)
test_seq = random.sample(list(range(1,total_days+1)), total_days - train_days)
data_train = data.loc[~data['seq_ID'].isin(test_seq)]
data_test = data.loc[data['seq_ID'].isin(test_seq)]
return min_max_scaler,min_max_scaler_dep, data, data_train, data_test, Ut_list_new, Ut_list_1, Ut_list_2
def predict(sequence, num_states, dependent_variables, Card_ID, data, SHMM, Ut_list, Ut_list_1,Ut_list_2,
save_info_list, C, percent_feature, save_predicted_rank, scaler_y, SCALAR_DURATION):
results={}
show_duration_predict = True
for info in save_info_list:
results[info] = []
Dt_h_2 = np.array(sorted(data.loc[:,dependent_variables[1]].unique()))
Dt_h_1 = np.array(np.arange(round(min(data['duration'])) - 0.5, round(max(data['duration'])) + 0.5, 0.01)) # candidate duration
for seq in sequence:
seq = seq.reset_index(drop=True)
for idx, row in seq.iterrows():
if idx == 0:
X_emi_1 = np.array([row[Ut_list_1]])
X_emi_2 = np.array([row[Ut_list_2]])
############################ location
X_ini = np.array([row[Ut_list]])
Log_ini_st = SHMM.model_initial.predict_log_proba(X_ini).reshape(num_states,)
log_Emission = np.zeros((len(Dt_h_2), num_states))
Ut_input = np.repeat(X_emi_2, len(Dt_h_2), axis=0)
for st in range(num_states):
# print(Dt_h.shape)
# print(X.shape)
log_Emission[:, st] = SHMM.model_emissions[st][1].loglike_per_sample(Ut_input, Dt_h_2)
log_P_temp = log_Emission + Log_ini_st
P_final = np.sum(np.exp(log_P_temp), axis=1)
Predict_value = Dt_h_2[np.argmax(P_final)]
True_value = row[dependent_variables[1]]
comb_results = [[P_final[i], Dt_h_2[i]] for i in range(len(Dt_h_2))]
comb_results = sorted(comb_results, reverse=True)
for i in range(save_predicted_rank):
if i >= len(comb_results):
rank_name = 'Predict' + str(i+1)
results[rank_name].append(-1) # no 20 candidates
else:
rank_name = 'Predict' + str(i+1)
results[rank_name].append(comb_results[i][1])
# plt.plot(Dt_h,P_final)
# plt.plot([True_value,True_value],[0,max(P_final)])
# plt.show()
results['ID'].append(row['ID'])
results['Card_ID'].append(Card_ID)
results['Ground_truth'].append(True_value)
if Predict_value == True_value:
results['Correct'].append(1)
else:
results['Correct'].append(0)
results['activity_index'].append(idx)
results['total_activity'].append(num_states)
results['percent_feature'].append(percent_feature)
results['C_reg'].append(C)
################################################################ continuous duration
Log_ini_st = SHMM.model_initial.predict_log_proba(X_ini).reshape(num_states,)
predict_Emission = np.zeros(num_states)
dispersion = np.zeros(num_states)
for st in range(num_states):
# print(Dt_h.shape)
# print(X.shape)
predict_Emission[st] = SHMM.model_emissions[st][0].predict(X_emi_1)
# dispersion[st] = SHMM.model_emissions[st][0].get_dispersion(Y_len = 1) #
# a=1
P_int_st = np.exp(Log_ini_st)
Predict_value_mean = sum(P_int_st * predict_Emission)
# Predict_value_var = sum((P_int_st**2) * dispersion)
True_value = row[dependent_variables[0]]
if SCALAR_DURATION:
predict_dur = scaler_y.inverse_transform(np.array([Predict_value_mean]).reshape(1, -1))[0][0]
true_value = scaler_y.inverse_transform(np.array([True_value]).reshape(1, -1))[0][0]
results['Predict_duration'].append(predict_dur)
results['Ground_truth_duration'].append(true_value)
else:
results['Predict_duration'].append(Predict_value_mean)
results['Ground_truth_duration'].append(True_value)
# results['Predict_duration_log_std'].append(np.sqrt(Predict_value_var))
else:
X_emi_1 = np.array([row[Ut_list_1]])
X_emi_2 = np.array([row[Ut_list_2]])
X_ini = np.array([row[Ut_list]])
############################ location # second dep
# calculate log_alpha
Known_seq = seq.loc[0:idx-1,:]
n_records = max(Known_seq.index) + 1
log_prob_initial = Log_ini_st
log_prob_transition = np.zeros((n_records - 1, num_states, num_states))
if n_records>1:
X_to_transit = np.array(Known_seq.loc[1:,Ut_list])
for st in range(num_states):
log_prob_transition[:, st, :] = SHMM.model_transition[st].predict_log_proba(X_to_transit)
assert log_prob_transition.shape == (n_records - 1, num_states, num_states)
# emission probability
log_Emission = np.zeros((n_records, num_states))
inp_emissions = np.array(Known_seq.loc[0:,Ut_list_1])
out_emissions = np.array(Known_seq.loc[0:,[dependent_variables[1]]])
model_collection = [models[1] for models in SHMM.model_emissions]
# print (model_collection)
log_Emission += np.vstack([model.loglike_per_sample(
inp_emissions.astype('float64'),out_emissions) for model in model_collection]).T
# print (np.exp(log_Ey))
# forward backward to calculate posterior
# print(out_emissions)
# print(out_emissions.shape)
# # print ('-----')
# print(inp_emissions)
# print(inp_emissions.shape)
log_gamma, log_epsilon, log_likelihood, log_alpha = forward_backward(
log_prob_initial, log_prob_transition, log_Emission, {})
# ------predict:
log_alpha_new = np.zeros((num_states,))
for j in range(num_states):
temp_alpha = 0
for i in range(num_states):
temp_alpha += np.exp(SHMM.model_transition[i].predict_log_proba(X_ini)[0,j] + log_alpha[-1,i])
# the first 0 is because we only have one row, so select the first. the second -1 is
# because log_alpha has the shape of t,k , where t is the number of timestamps (length) of the sequence.
# where k is the number of states of the HMM
log_alpha_new[j] = np.log(temp_alpha)
log_P_D1T_u1Th=np.zeros((num_states,))
for i in range(num_states):
log_P_D1T_u1Th[i]=log_alpha_new[i] - logsumexp(log_alpha_new[:])
log_Emission = np.zeros((len(Dt_h_2), num_states))
Ut_input_2 = np.repeat(X_emi_2, len(Dt_h_2), axis=0)
for st in range(num_states):
# print(Dt_h.shape)
# print(X.shape)
log_Emission[:, st] = SHMM.model_emissions[st][1].loglike_per_sample(Ut_input_2, Dt_h_2)
log_P_temp = log_Emission + log_P_D1T_u1Th
P_final = np.sum(np.exp(log_P_temp), axis=1)
Predict_value = Dt_h_2[np.argmax(P_final)]
True_value = row[dependent_variables[1]]
comb_results = [[P_final[i], Dt_h_2[i]] for i in range(len(Dt_h_2))]
comb_results = sorted(comb_results, reverse=True)
for i in range(save_predicted_rank):
if i >= len(comb_results):
rank_name = 'Predict' + str(i+1)
results[rank_name].append(-1) # no 20 candidates
else:
rank_name = 'Predict' + str(i+1)
results[rank_name].append(comb_results[i][1])
results['ID'].append(row['ID'])
results['Card_ID'].append(Card_ID)
results['Ground_truth'].append(True_value)
if Predict_value == True_value:
results['Correct'].append(1)
else:
results['Correct'].append(0)
results['activity_index'].append(idx)
results['total_activity'].append(num_states)
results['percent_feature'].append(percent_feature)
results['C_reg'].append(C)
############################ duration
# calculate log_alpha
Known_seq = seq.loc[0:idx-1,:]
n_records = max(Known_seq.index) + 1
log_prob_initial = Log_ini_st
log_prob_transition = np.zeros((n_records - 1, num_states, num_states))
if n_records>1:
X_to_transit = np.array(Known_seq.loc[1:,Ut_list])
for st in range(num_states):
log_prob_transition[:, st, :] = SHMM.model_transition[st].predict_log_proba(X_to_transit)
assert log_prob_transition.shape == (n_records - 1, num_states, num_states)
# emission probability
log_Emission = np.zeros((n_records, num_states))
inp_emissions = np.array(Known_seq.loc[0:,Ut_list_1])
out_emissions = np.array(Known_seq.loc[0:,[dependent_variables[0]]])
model_collection = [models[0] for models in SHMM.model_emissions]
# print (model_collection)
log_Emission += np.vstack([model.loglike_per_sample(
inp_emissions.astype('float64'),out_emissions) for model in model_collection]).T
# print (np.exp(log_Ey))
# forward backward to calculate posterior
# print(out_emissions)
# print(out_emissions.shape)
# # print ('-----')
# print(inp_emissions)
# print(inp_emissions.shape)
log_gamma, log_epsilon, log_likelihood, log_alpha = forward_backward(
log_prob_initial, log_prob_transition, log_Emission, {})
# ------predict:
log_alpha_new = np.zeros((num_states,))
for j in range(num_states):
temp_alpha = 0
for i in range(num_states):
temp_alpha += np.exp(SHMM.model_transition[i].predict_log_proba(X_ini)[0,j] + log_alpha[-1,i])
# the first 0 is because we only have one row, so select the first. the second -1 is
# because log_alpha has the shape of t,k , where t is the number of timestamps (length) of the sequence.
# where k is the number of states of the HMM
log_alpha_new[j] = np.log(temp_alpha)
log_P_D1T_u1Th=np.zeros((num_states,))
for i in range(num_states):
log_P_D1T_u1Th[i]=log_alpha_new[i] - logsumexp(log_alpha_new[:])
predict_Emission = np.zeros(num_states)
dispersion = np.zeros(num_states)
for st in range(num_states):
# print(Dt_h.shape)
# print(X.shape)
predict_Emission[st] = SHMM.model_emissions[st][0].predict(X_emi_1)
dispersion[st] = SHMM.model_emissions[st][0].get_dispersion(Y_len = 1) #
# a=1
P_st = np.exp(log_P_D1T_u1Th)
Predict_value_mean = sum(P_st * predict_Emission)
Predict_value_var = sum((P_st**2) * dispersion)
True_value = row[dependent_variables[0]]
if SCALAR_DURATION:
predict_dur = scaler_y.inverse_transform(np.array([Predict_value_mean]).reshape(1, -1))[0][0]
true_value = scaler_y.inverse_transform(np.array([True_value]).reshape(1, -1))[0][0]
results['Predict_duration'].append(predict_dur)
results['Ground_truth_duration'].append(true_value)
else:
results['Predict_duration'].append(Predict_value_mean)
results['Ground_truth_duration'].append(True_value)
return results
def calculate_accuracy(result_df, task = None):
if task == 'loc':
RMSE = -1
MAPE = -1
MAE = -1
R_sq = -1
else:
# correct error data
result_df.loc[result_df['Predict_duration'] > 86400, 'Predict_duration'] = 86400
result_df.loc[result_df['Predict_duration'] <= 0, 'Predict_duration'] = 1
result_df['error_sq'] = (result_df['Predict_duration'] - result_df['Ground_truth_duration'])**2
result_df['error_abs'] = np.abs(result_df['Predict_duration'] - result_df['Ground_truth_duration'])
RMSE = np.sqrt(np.mean(result_df['error_sq']))
MAPE = np.mean(result_df['error_abs']/result_df['Ground_truth_duration'])
MAE = np.mean(result_df['error_abs'])
R_sq = r2_score(result_df['Ground_truth_duration'], result_df['Predict_duration'])
N_first | |
math.square([-2., 0., 3.])
<Tensor: shape=(3,), dtype=float32, numpy=array([4., 0., 9.], dtype=float32)>
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`,
`complex64`, `complex128`.
Returns:
A `Tensor`. Has the same type as `x`.
"""
return x ** 2
@numpy_compatible
def abs(x: Tensor):
r"""Computes the absolute value of a tensor.
Given a tensor of integer or floating-point values, this operation returns a
tensor of the same type, where each element contains the absolute value of the
corresponding element in the input.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. For
a complex number \\(a + bj\\), its absolute value is computed as \\(\sqrt{a^2
+ b^2}\\). For example:
>>> x = to_tensor([[-2.25 + 4.75j], [-3.25 + 5.75j]])
>>> abs(x)
<Tensor: shape=(2, 1), dtype=float64, numpy=
array([[5.25594901],
[6.60492241]])>
Args:
x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
`int32`, `int64`, `complex64` or `complex128`.
Returns:
A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,
with absolute values. Note, for `complex64` or `complex128` input, the
returned `Tensor` will be of type `float32` or `float64`, respectively.
"""
return x.abs()
@numpy_compatible
def pow(x: Tensor, y:(Tensor,float)):
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = to_tensor([[2, 2], [3, 3]])
y = to_tensor([[8, 16], [2, 3]])
pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x (Tensor): input tensor.
y (Tensor): another tensor.
Returns:
A `Tensor`.
"""
y = to_tensor(y, dtype=x.dtype,device=x.device)
return torch.pow(x,y)
@numpy_compatible
def log(x: Tensor):
r"""Computes natural logarithm of x element-wise.
I.e., \\(y = \log_e x\\).
See: https://en.wikipedia.org/wiki/Logarithm
Args:
x (Tensor): input tensor.
Returns:
A `Tensor`. Has the same type as `x`.
Examples:
>>> x = to_tensor([0, 0.5, 1, 5])
>>> log(x)
array([ -inf, -0.6931472, 0. , 1.609438 ])
"""
return torch.log(x)
@numpy_compatible
def exp(x: Tensor):
r"""Computes exponential of x element-wise. \\(y = e^x\\).
This function computes the exponential of the input tensor element-wise.
i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
\\(e\\) denotes Euler's number and is approximately equal to 2.718281.
Output is positive for any real input.
>>> x = to_tensor(2.0)
>>> exp(x)
<Tensor: shape=(), dtype=float32, numpy=7.389056>
>>> x = to_tensor([2.0, 8.0])
>>> exp(x)
tensor([ 7.389056, 2980.958 ])
For complex numbers, the exponential value is calculated as
\\(e^{x+iy}={e^x}{e^{iy}}={e^x}{\\cos(y)+i\\sin(y)}\\)
For `1+1j` the value would be computed as:
\\(e^1{\\cos(1)+i\\sin(1)} = 2.7182817 \\times (0.5403023+0.84147096j)\\)
>>> x =to_tensor(1 + 1j)
>>> exp(x)
tensor(1.4686939399158851+2.2873552871788423j)>
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`, `complex64`, `complex128`.
Returns:
A `Tensor`. Has the same type as `x`.
@compatibility(numpy)
Equivalent to np.exp
@end_compatibility
"""
return torch.exp(x)
@numpy_compatible
def clip(x: Tensor, min=None, max=None):
"""
Args:
x (Tensor): input tensor.
min ():
max ():
Returns:
"""
return torch.clamp(x,min=min,max=max)
@numpy_compatible
def sin(x: Tensor):
"""Computes the element-wise sine
Args:
x (Tensor): input tensor.
Returns: element-wise sine
Examples:
>>> sin(to_tensor([[1,0.5],[-0.25,-0.75]])).cpu()
tensor([[ 0.8415, 0.4794],
[-0.2474, -0.6816]])
"""
return torch.sin(x)
@numpy_compatible
def cos(x: Tensor):
"""Computes the element-wise cosine
Args:
x (Tensor): input tensor.
Returns: element-wise cosine
Examples:
>>> cos(to_tensor([[1,0.5],[-0.25,-0.75]])).cpu()
tensor([[0.5403, 0.8776],
[0.9689, 0.7317]])
"""
return torch.cos(x)
@numpy_compatible
def tan(x: Tensor):
"""Computes the element-wise tan
Args:
x (Tensor): input tensor.
Returns: element-wise tan
Examples:
>>> tan(to_tensor([[1,0.5],[-0.25,-0.75]])).cpu()
tensor([[ 1.5574, 0.5463],
[-0.2553, -0.9316]])
"""
return torch.tan(x)
@numpy_compatible
def asin(x: Tensor):
"""Computes the element-wise arcsin (inverse sine)
Args:
x (Tensor): input tensor.
Returns: element-wise arcsin
Examples:
>>> asin(to_tensor([[1,0.5],[-0.25,-0.75]])).cpu()
tensor([[ 1.5708, 0.5236],
[-0.2527, -0.8481]])
"""
return torch.asin(x)
@numpy_compatible
def acos(x: Tensor):
"""Computes the element-wise arccos (inverse cosine)
Args:
x (Tensor): input tensor.
Returns: element-wise arccos
Examples:
>>> acos(to_tensor([[1,0.5],[-0.25,-0.75]])).cpu()
tensor([[0.0000, 1.0472],
[1.8235, 2.4189]])
"""
return torch.acos(x)
@numpy_compatible
def atan(x: Tensor):
"""Computes the element-wise arctan (inverse tan)
Args:
x (Tensor): input tensor.
Returns: element-wise arccos
Examples:
>>> atan(to_tensor([-1, 0, 1])).cpu()
tensor([-0.7854, 0.0000, 0.7854])
"""
return torch.atan(x)
@numpy_compatible
def sinh(x: Tensor):
"""Computes the element-wise sinh
Args:
x (Tensor): input tensor.
Returns: element-wise sinh
Examples:
>>> sinh(to_tensor([[1,0.5],[-0.25,-0.75]])).cpu()
tensor([[ 1.1752, 0.5211],
[-0.2526, -0.8223]])
"""
return torch.sinh(x)
@numpy_compatible
def cosh(x: Tensor):
"""Computes the element-wise cosh
Args:
x (Tensor): input tensor.
Returns: element-wise cosh
Examples:
>>> cosh(to_tensor([[1,0.5],[-0.25,-0.75]])).cpu()
tensor([[1.5431, 1.1276],
[1.0314, 1.2947]])
"""
return torch.cosh(x)
@numpy_compatible
def tanh(x: Tensor):
"""Computes the element-wise tanh
Args:
x (Tensor): input tensor.
Returns: element-wise tanh
Examples:
>>> tanh(to_tensor([[1,0.5],[-0.25,-0.75]])).cpu()
tensor([[ 0.7616, 0.4621],
[-0.2449, -0.6351]])
"""
return torch.tanh(x)
############################
## elementwise operation
###########################
@numpy_compatible
def element_times(left, right):
"""
The output of this operation is the element-wise product of the two input
tensors. It supports broadcasting.
Args:
right: right side tensor
left: left side tensor
Returns:
:the element-wise product of the two input
Examples:
>>> element_times(to_tensor([1., 1., 1., 1.]), to_tensor([0.5, 0.25, 0.125, 0.]))
tensor([0.5000, 0.2500, 0.1250, 0.0000])
>>> element_times(to_tensor([5., 10., 15., 30.]),to_tensor([2.]))
tensor([10., 20., 30., 60.])
>>> element_times(to_tensor([[5., 10.], [15., 30.]]), to_tensor([[1., 2.], [3.,1.]]))
tensor([[ 5., 20.],
[45., 30.]])
"""
return left * right
@numpy_compatible
def element_max(left, right):
"""
The output of this operation is the element-wise product of the two input
tensors. It supports broadcasting.
Args:
right: right side tensor
left: left side tensor
Returns:
:the element-wise product of the two input
Examples:
>>> element_max(to_tensor([1., 1., 0., -1.]), to_tensor([0.5, 0.25, 0.125, 0.]))
tensor([1.0000, 1.0000, 0.1250, 0.0000])
>>> element_max(to_tensor([5., 10., 15., 30.]),to_tensor([20.]))
tensor([20., 20., 20., 30.])
>>> element_max(to_tensor([5., 10., 15., 30.]), to_tensor([10., 2., 8., 2.]))
tensor([10., 10., 15., 30.])
"""
return torch.max(left, right)
@numpy_compatible
def element_min(left, right):
"""
The output of this operation is the element-wise product of the two input
tensors. It supports broadcasting.
Args:
right: right side tensor
left: left side tensor
Returns:
:the element-wise product of the two input
Examples:
>>> element_min(to_tensor([1., 1., 1., 1.]), to_tensor([0.5, 0.25, 0.125, 0.]))
tensor([0.5000, 0.2500, 0.1250, 0.0000])
>>> element_min(to_tensor([5., 10., 15., 30.]),to_tensor([2.]))
tensor([2., 2., 2., 2.])
>>> element_min(to_tensor([5., 10., 15., 30.]), to_tensor([1., 2., 1., 2.]))
tensor([1., 2., 1., 2.])
"""
return torch.min(left, right)
@numpy_compatible
def element_divide(left, right):
"""
The output of this operation is the element-wise divide of the two input
tensors. It supports broadcasting.
Args:
right: right side tensor
left: left side tensor
Returns:
:the element-wise divide of the two input
Examples:
>>> element_divide(to_tensor([1., 1., 1., 1.]), to_tensor([0.5, 0.25, 0.125, 0.]))
tensor([2., 4., 8., inf])
>>> element_divide(to_tensor([5., 10., 15., 30.]),to_tensor([2.]))
tensor([ 2.5000, 5.0000, 7.5000, 15.0000])
>>> element_divide(to_tensor([5., 10., 15., 30.]), to_tensor([1., 2., 1., 2.]))
tensor([ 5., 5., 15., 15.])
"""
return torch.true_divide(left, right)
@numpy_compatible
def element_cosine_distance(v1, v2, axis=-1):
"""
Args:
v1 (ndarray, tensor): has the shape [batch: embedded dimensions]
v2 (ndarray, tensor)):has the shape [batch: embedded dimensions]
axis ():
Returns:
"""
x_normalized=l2_normalize(v1,axis=axis, keepdims=True)
y_normalized =l2_normalize(v2,axis=axis, keepdims=True)
cos=matmul(x_normalized,y_normalized,False,True)
# cos1 = (v1 * v2).sum(dim=reduce_dim, keepdims=False) / (
# (v1 * v1).sum(dim=reduce_dim, keepdims=False).sqrt() * (v2 * v2).sum(dim=reduce_dim,
# keepdims=False).sqrt())
return cos
@numpy_compatible
def where(flag, value_if_true=None, value_if_false=None):
"""
return either ``value_if_true`` or ``value_if_false`` based on the value of ``flag``.
If ``flag`` != 0 ``value_if_true`` is returned, otherwise ``value_if_false``.
Behaves analogously to numpy.where(...).
Args:
flag: condition tensor
value_if_true: true branch tensor
value_if_false: false branch tensor
Returns:
:conditional selection
Examples:
>>> x=to_tensor([0.1, 0.9, 0.8, 0.4, 0.5])
>>> where(x>0.5, x, zeros_like(x))
tensor([0.0000, 0.9000, 0.8000, 0.0000, 0.0000])
"""
if value_if_true is None and value_if_false is None:
return torch.where(flag)
else:
return torch.where(flag.bool(), value_if_true, value_if_false)
############################
## reduce operation
###########################
@numpy_compatible
def reduce_mean(x: Tensor, axis=None, keepdims=False, **kwargs):
"""Computes the mean of the input tensor's elements across a specified axis or a list of specified axes.
Args:
x (Tensor): input tensor.
axis (int,list): axis along which the reduction will be performed
keepdims (bool): Keep the reduced dimension or not, default True mean keep reduced dimension
**kwargs ():
Returns:
Examples:
>>> data = to_tensor(np.array([[[5,1], [20,2]],[[30,1], [40,2]],[[55,1], [60,2]]], dtype=np.float32))
>>> print(reduce_mean(data, 0).cpu())
tensor([[30., 1.],
[40., 2.]])
>>> print(reduce_mean(data, axis=0).cpu())
tensor([[30., 1.],
[40., 2.]])
>>> print(reduce_mean(data, axis=[0,2]).cpu())
tensor([15.5000, 21.0000])
"""
axis = kwargs.get('dim', axis)
keepdims = kwargs.get('keepdim', keepdims)
if x.element_size() == 0:
return x
if x.dtype==Dtype.bool:
x.to(_float_dtype)
if axis is None or isinstance(axis, (int, list, tuple)) :
if axis is None and keepdims ==False:
return torch.mean(x)
else:
return torch.mean(x,axis,keepdim=keepdims)
else:
return torch.mean(x)
@numpy_compatible
def reduce_sum(x: Tensor, axis=None, keepdims=False, **kwargs):
"""Computes the sum of the input | |
any nested
``Gather`` nodes
"""
ans = self.referee
while isinstance(ans, Gather):
ans = ans.referee
assert isinstance(ans, (Decl, Call))
return ans
class WorkflowSection(WorkflowNode):
"""
Base class for workflow nodes representing scatter and conditional sections
"""
body: List[WorkflowNode]
"""
:type: List[WorkflowNode]
Section body, potentially including nested sections.
"""
gathers: Dict[str, Gather]
"""
:type: Dict[str, Gather]
``Gather`` nodes exposing the section body's products to the rest of the workflow. The dict is
keyed by ``workflow_node_id`` of the interior node, to expedite looking up the corresponding
gather node.
The section's body and gather nodes do not explicitly include the section node among their
dependencies. Such dependence is implicit because the body subgraph can be "instantiated" only
upon visiting the section node at runtime.
"""
_type_env: Optional[Env.Bindings[Type.Base]] = None
"""
After typechecking: the type environment, INSIDE the section, consisting of
- everything available outside of the section
- declarations and call outputs in the scatter (singletons)
- declarations & outputs gathered from sub-sections (arrays/optionals)
- the scatter variable, if applicable
"""
def __init__(self, body: List[WorkflowNode], *args, **kwargs):
super().__init__(*args, **kwargs)
self.body = body
# TODO: add dependency on self to each body node?
# populate gathers
self.gathers = dict()
for elt in self.body:
if isinstance(elt, (Decl, Call)):
# assert elt.workflow_node_id not in self.gathers
# ^ won't hold if the section has internal name collisions, which will be checked
# later upon building the type environment.
self.gathers[elt.workflow_node_id] = Gather(self, elt)
elif isinstance(elt, WorkflowSection):
# gather gathers!
for subgather in elt.gathers.values():
# assert subgather.workflow_node_id not in self.gathers
# id.
self.gathers[subgather.workflow_node_id] = Gather(self, subgather)
@property
def children(self) -> Iterable[SourceNode]:
""""""
for elt in self.body:
yield elt
for elt in self.gathers.values():
yield elt
@property
@abstractmethod
def effective_outputs(self) -> Env.Bindings[Type.Base]:
raise NotImplementedError()
class Scatter(WorkflowSection):
"""Workflow scatter section"""
variable: str
"""
:type: string
Scatter variable name"""
expr: Expr.Base
"""
:type: WDL.Expr.Base
Expression for the array over which to scatter"""
def __init__(
self, pos: SourcePosition, variable: str, expr: Expr.Base, body: List[WorkflowNode]
) -> None:
super().__init__(body, "scatter-L{}C{}-{}".format(pos.line, pos.column, variable), pos)
self.variable = variable
self.expr = expr
for body_node in self.body:
body_node._increment_scatter_depth()
# excluded our gather nodes, which are not "within" the section
@property
def children(self) -> Iterable[SourceNode]:
""""""
yield self.expr
yield from super().children
def add_to_type_env(
self, struct_types: Env.Bindings[Dict[str, Type.Base]], type_env: Env.Bindings[Type.Base]
) -> Env.Bindings[Type.Base]:
# Add declarations and call outputs in this section as they'll be
# available outside of the section (i.e. a declaration of type T is
# seen as Array[T] outside)
inner_type_env = Env.Bindings()
for elt in self.body:
inner_type_env = elt.add_to_type_env(struct_types, inner_type_env)
# Subtlety: if the scatter array is statically nonempty, then so too
# are the arrayized values.
nonempty = isinstance(self.expr._type, Type.Array) and self.expr._type.nonempty
# array-ize each inner type binding and add gather nodes
def arrayize(binding: Env.Binding[Type.Base]) -> Env.Binding[Type.Base]:
return Env.Binding( # pyre-ignore
binding.name,
Type.Array(binding.value, nonempty=nonempty),
self.gathers[binding.info.workflow_node_id],
)
return Env.merge(inner_type_env.map(arrayize), type_env)
@property
def effective_outputs(self) -> Env.Bindings[Type.Base]:
# Yield the outputs of calls in this section and subsections, typed
# and namespaced appropriately, as they'll be propagated if the
# workflow lacks an explicit output{} section
nonempty = isinstance(self.expr._type, Type.Array) and self.expr._type.nonempty
inner_outputs = Env.Bindings()
for elt in self.body:
if not isinstance(elt, Decl):
assert isinstance(elt, (Call, Scatter, Conditional))
inner_outputs = Env.merge(elt.effective_outputs, inner_outputs)
def arrayize(binding: Env.Binding[Type.Base]) -> Env.Binding[Type.Base]:
return Env.Binding( # pyre-ignore
binding.name,
Type.Array(binding.value, nonempty=nonempty),
self.gathers[binding.info.workflow_node_id],
)
return inner_outputs.map(arrayize) # pyre-ignore
def _workflow_node_dependencies(self) -> Iterable[str]:
yield from _expr_workflow_node_dependencies(self.expr)
class Conditional(WorkflowSection):
"""Workflow conditional (if) section"""
expr: Expr.Base
"""
:tree: WDL.Expr.Base
Boolean expression"""
def __init__(self, pos: SourcePosition, expr: Expr.Base, body: List[WorkflowNode]) -> None:
super().__init__(body, "if-L{}C{}".format(pos.line, pos.column), pos)
# TODO: add to id the name of 'shallowest' (closest to root) ident in expr
self.expr = expr
@property
def children(self) -> Iterable[SourceNode]:
""""""
yield self.expr
yield from super().children
def add_to_type_env(
self, struct_types: Env.Bindings[Dict[str, Type.Base]], type_env: Env.Bindings[Type.Base]
) -> Env.Bindings[Type.Base]:
# Add declarations and call outputs in this section as they'll be
# available outside of the section (i.e. a declaration of type T is
# seen as T? outside)
inner_type_env = Env.Bindings()
for elt in self.body:
inner_type_env = elt.add_to_type_env(struct_types, inner_type_env)
# optional-ize each inner type binding and add gather nodes
def optionalize(binding: Env.Binding[Type.Base]) -> Env.Binding[Type.Base]:
return Env.Binding(
binding.name,
binding.value.copy(optional=True),
self.gathers[binding.info.workflow_node_id],
)
return Env.merge(inner_type_env.map(optionalize), type_env)
@property
def effective_outputs(self) -> Env.Bindings[Type.Base]:
# Yield the outputs of calls in this section and subsections, typed
# and namespaced appropriately, as they'll be propagated if the
# workflow lacks an explicit output{} section
inner_outputs = Env.Bindings()
for elt in self.body:
if isinstance(elt, (Call, WorkflowSection)):
inner_outputs = Env.merge(elt.effective_outputs, inner_outputs)
def optionalize(binding: Env.Binding[Type.Base]) -> Env.Binding[Type.Base]:
return Env.Binding(
binding.name,
binding.value.copy(optional=True),
self.gathers[binding.info.workflow_node_id],
)
return inner_outputs.map(optionalize) # pyre-ignore
def _workflow_node_dependencies(self) -> Iterable[str]:
yield from _expr_workflow_node_dependencies(self.expr)
class Workflow(SourceNode):
name: str
":type: str"
inputs: Optional[List[Decl]]
""":type: List[WDL.Tree.Decl]
Declarations in the ``input{}`` workflow section, if it's present"""
body: List[WorkflowNode]
""":type: List[Union[WDL.Tree.Decl,WDL.Tree.Call,WDL.Tree.Scatter,WDL.Tree.Conditional]]
Workflow body in between ``input{}`` and ``output{}`` sections, if any
"""
outputs: Optional[List[Decl]]
""":type: Optional[List[WDL.Tree.Decl]]
Workflow output declarations, if the ``output{}`` section is present"""
# following two fields temporarily hold old-style (pre 1.0) outputs with
# bare identifiers or namespace wildcards. We postprocess them into
# full declarations as expected in WDL 1.0+.
_output_idents: List[List[str]]
_output_idents_pos: Optional[Error.SourcePosition]
parameter_meta: Dict[str, Any]
"""
:type: Dict[str,Any]
``parameter_meta{}`` section as a JSON-like dict"""
meta: Dict[str, Any]
"""
:type: Dict[str,Any]
``meta{}`` section as a JSON-like dict"""
_type_env: Optional[Env.Bindings[Type.Base]] = None
"""
After typechecking: the type environment in the main workflow body,
- declarations at the top level of the workflow body
- outputs of calls at the top level the workflow body
- declarations & outputs inside scatter sections (as arrays)
- declarations & outputs inside conditional sections (as optionals)
"""
complete_calls: bool
"""
After typechecking, False if the workflow has a call which does not supply
all required inputs (and thus cannot be called from another workflow).
"""
_nodes_by_id: Dict[str, WorkflowNode] # memoizer
effective_wdl_version: str
""":type: str
Effective WDL version of the containing document
"""
def __init__(
self,
pos: SourcePosition,
name: str,
inputs: Optional[List[Decl]],
body: List[WorkflowNode],
outputs: Optional[List[Decl]],
parameter_meta: Dict[str, Any],
meta: Dict[str, Any],
output_idents: Optional[List[List[str]]] = None,
output_idents_pos: Optional[SourcePosition] = None,
) -> None:
super().__init__(pos)
self.name = name
self.inputs = inputs
self.body = body
self.outputs = outputs
self._output_idents = output_idents or []
self._output_idents_pos = output_idents_pos
self.parameter_meta = parameter_meta
self.meta = meta
self.complete_calls = True
self._nodes_by_id = {}
self.effective_wdl_version = "" # overridden by Document.__init__
# Hack: modify workflow node IDs for output decls since, in draft-2, they could reuse names
# of earlier decls
for output_decl in self.outputs or []:
output_decl.workflow_node_id = output_decl.workflow_node_id.replace("decl-", "output-")
@property
def available_inputs(self) -> Env.Bindings[Decl]:
""":type: WDL.Env.Bindings[WDL.Tree.Decl]
The workflow's input declarations. This includes:
1. If the ``input{}`` workflow section is present, all declarations within that section.
Otherwise, all declarations in the top-level workflow body, excluding outputs. (This
dichotomy bridges pre-1.0 and 1.0+ WDL versions.) These appear at the top level of the Env,
with no namespace.
2. Available inputs of all calls in the workflow, namespaced by the call names.
"""
ans = Env.Bindings()
# order of operations here ensures that iterating the env yields decls in the source order
for c in reversed(list(_calls(self))):
ans = Env.merge(c.available_inputs, ans)
if self.inputs is not None:
for decl in reversed(self.inputs):
ans = ans.bind(decl.name, decl)
else:
for elt in reversed(self.body):
if isinstance(elt, Decl):
ans = ans.bind(elt.name, elt)
return ans
@property
def required_inputs(self) -> Env.Bindings[Decl]:
""":type: WDL.Env.Bindings[Decl]
The subset of available inputs which are required to start the workflow.
"""
ans = Env.Bindings()
for c in reversed(list(_calls(self))):
ans = Env.merge(c.required_inputs, ans)
for b in reversed(list(self.available_inputs)):
if "." not in b.name:
d = b.value
assert isinstance(d, Decl)
if not d.type.optional and not d.expr:
ans = ans.bind(b.name, b.value)
return ans
@property
def effective_outputs(self) -> Env.Bindings[Type.Base]:
""":type: WDL.Env.Bindings[Decl]
If the ``output{}`` workflow section is present, yields the names and
types therein, at the top level of the Env. Otherwise, yield all the
call outputs, namespaced and typed appropriately.
"""
ans = Env.Bindings()
if self.outputs is not None:
for decl | |
<reponame>abasili98/thepdgt-bot
from flask import Flask, request, jsonify, make_response, render_template
from flask import Response
import json
import requests
from os import environ
import psycopg2
from cryptography.fernet import Fernet
app = Flask(__name__)
FERNET_KEY = environ.get('FERNET_KEY')
f = Fernet(FERNET_KEY.encode("utf-8"))
def encrypt(text):
bText = text.encode("utf-8")
cText = f.encrypt(bText)
return cText.decode("utf-8")
def decrypt(text):
try:
bText = text.encode("utf-8")
cText = f.decrypt(bText)
return cText.decode("utf-8")
except Exception as e:
print("Errore in Decript %s",e)
return -1
#INIZIO DATABASE
try:
dbConn = psycopg2.connect(
host = environ.get('DATABASE_HOST'),
database = environ.get('DATABASE_DB'),
user = environ.get('DATABASE_USER'),
password = environ.get('DATABASE_PWD'),
port = environ.get('DATABASE_PORT')
)
print("Connessione al DataBase riuscita")
except:
Response.status(503)
print("Errore nella connessione con il DataBase")
def insertChatId(chat_id):
try:
cur = dbConn.cursor()
cur.execute(f"INSERT INTO users (chat_id) VALUES ({chat_id})")
dbConn.commit()
print("Chat Id inserita correttamente")
except psycopg2.Error as e:
error = e.pgcode
print("ERRORE in insertChatId!: %s", error)
finally:
cur.close()
return 0
def getApiKeyFromChatId(chat_id):
try:
cur = dbConn.cursor()
cur.execute(f"SELECT api_key FROM users WHERE chat_id = \'{chat_id}\'")
row = cur.fetchone()
r = decrypt(row[0])
if r != -1:
return str(r)
else:
return f'-1'
except psycopg2.Error as e:
error = e.pgcode
print("ERRORE comunicazione con DB in getApiKeyfromChatId!: %s", error)
return f'-1'
finally:
cur.close()
#Cambaire l'API KEY assocciata al Chat Id
def setApiKey(chat_id, api_key):
try:
cur = dbConn.cursor()
api_key = encrypt(api_key)
cur.execute(f"UPDATE users SET api_key = \'{api_key}\' WHERE chat_id = \'{chat_id}\'")
dbConn.commit()
print("API KEY settata correttamente")
except psycopg2.Error as e:
error = e.pgcode
print("ERRORE in setApiKey!: %s", error)
finally:
cur.close()
return 0
#Dal Chat Id ottiene lo stato attuale della chat
def getStatus(chat_id):
try:
cur = dbConn.cursor()
cur.execute(f"SELECT status FROM users WHERE chat_id = \'{chat_id}\'")
row = cur.fetchone()
except psycopg2.Error as e:
error = e.pgcode
print("ERRORE in getStatus!: %s", error)
finally:
cur.close()
if row:
return str(row[0])
else:
return -1
#Cambia lo stato
def setStatus(chat_id, s):
try:
cur = dbConn.cursor()
cur.execute(f"UPDATE users SET status = \'{s}\' WHERE chat_id = \'{chat_id}\'")
dbConn.commit()
print("Stauts aggiornato correttamente")
except psycopg2.Error as e:
error = e.pgcode
print("ERRORE in setStatus!: %s", error)
finally:
cur.close()
return 0
def getAuth(chat_id):
try:
cur = dbConn.cursor()
cur.execute(f"SELECT auth FROM users WHERE chat_id = \'{chat_id}\'")
row = cur.fetchone()
except psycopg2.Error as e:
error = e.pgcode
print("ERRORE in getStatus!: %s", error)
finally:
cur.close()
if row:
return str(row[0])
else:
return -1
#1 - sei loggato
#0 non sei loggato
def setAuth(chat_id, s):
try:
cur = dbConn.cursor()
cur.execute(f"UPDATE users SET auth = \'{s}\' WHERE chat_id = \'{chat_id}\'")
dbConn.commit()
print("Autorizzazione settata correttamente")
except psycopg2.Error as e:
error = e.pgcode
print("ERRORE in setAuth!: %s", error)
finally:
cur.close()
return 0
#FINE DATABASE
#INIZIO API
@app.route('/link/<idLink>', methods=['GET', 'DELETE'])
def apiLink(idLink):
api_key = request.args.get('api_key', None)
if request.method == 'GET':
url = f'https://api.rebrandly.com/v1/links/{idLink}?apikey={api_key}'
response = requests.get(url)
if response.status_code != 200:
return make_response(f'ID link non trovato', 404)
response = response.json()
rId = response.get('id')
rTitle = response.get('title')
rDest = response.get('destination')
rShortUrl = response.get('shortUrl')
rStatus = response.get('status')
rClicks = response.get('clicks')
rCreate = response.get('createdAt')
r = {
"id": rId,
"title": rTitle,
"destination": rDest,
"shortUrl": rShortUrl,
"status": rStatus,
"clicks": rClicks,
"createdAt": rCreate
}
return make_response(jsonify(r), 200)
elif request.method == 'DELETE':
if api_key == None and idLink == None:
return make_response(f'API KEY o l\'ID link non presenti', 404)
url = f'https://api.rebrandly.com/v1/links/{idLink}?apikey={api_key}'
response = requests.delete(url)
if response.status_code != 200:
return make_response(f'ID link non trovato', 404)
response = response.json()
rId = response.get('id')
rTitle = response.get('title')
rDest = response.get('destination')
rShortUrl = response.get('shortUrl')
r = {
'id': rId,
"title": rTitle,
"destination": rDest,
"shortUrl": rShortUrl
}
return make_response(jsonify(r), 200)
else:
return make_response("Comando non valdo", 400)
@app.route('/accountinfo')
def apiAccountInfo():
api_key = request.args.get('api_key', None)
url = "https://api.rebrandly.com/v1/account"
headers = {"apikey": api_key}
response = requests.request("GET", url, headers=headers)
if response.status_code != 200:
return make_response(f'APIKEY non trovata', 404)
response = response.json()
rId = response.get('id')
rUsername = response.get('username')
rEmail = response.get('email')
rFullName = response.get('fullName')
rType = response.get('subscription', {}).get('category')
rLinksUsed = response.get('subscription', {}).get('limits', {}).get('links', {}).get('used')
rLinksMaxLimit = response.get('subscription', {}).get('limits', {}).get('links', {}).get('included')
rLinksBlocked = response.get('subscription', {}).get('limits', {}).get('links', {}).get('blocked')
#if rId and rUsername and rEmail and rFullName and rType and rLinksUsed and rLinksMaxLimit and rLinksBlocked:
r = {
"id": rId,
"username": rUsername,
"email": rEmail,
"fullName": rFullName,
"typeAccount": rType,
"linksUsed": rLinksUsed,
"maxLimitLinks": rLinksMaxLimit,
"blockedLinks": rLinksBlocked
}
return make_response(jsonify(r), 200)
@app.route('/newlink', methods=['POST'])
def apiNewLink():
api_key = request.args.get('api_key', None)
destUrl = request.args.get('destUrl', None)
url = f'https://api.rebrandly.com/v1/links/new?apikey={api_key}&destination={destUrl}'
response = requests.get(url)
if response.status_code != 200:
return make_response(f'Errore nel creare il link', 400)
response = response.json()
shortUrl = response.get('shortUrl')
r = {
"shortUrl": shortUrl
}
return make_response(jsonify(r), 200)
@app.route('/countlink')
def apiCountLink():
api = request.args.get('api_key', None)
url = "https://api.rebrandly.com/v1/links/count"
headers = {"apikey": api}
response = requests.request("GET", url, headers=headers)
if response.status_code != 200:
return make_response(f'Errore nell\'ottenre l\'inofrmazione', 400)
response = response.json()
rCount = response.get('count')
r = {
"count": rCount
}
return make_response(jsonify(r), 200)
@app.route('/listlink')
def apiListLink():
api_key = request.args.get('api_key', None)
url = f'https://api.rebrandly.com/v1/links'
querystring = {"orderBy":"createdAt","orderDir":"desc","limit":"25"}
headers = {"apikey": api_key}
response = requests.request("GET", url, headers=headers, params=querystring)
if response.status_code != 200:
return make_response(response, 400)
response = response.json()
rList = []
for item in response:
temp = {"id":None, "title":None, "destination":None, "shortUrl":None }
temp['id'] = item['id']
temp['title'] = item['title']
temp['destination'] = item['destination']
temp['shortUrl'] = item['shortUrl']
rList.append(temp)
return make_response(jsonify(rList), 200)
@app.route('/deletealllinks', methods=['DELETE'])
def apiDelAllLinks():
api_key = request.args.get('api_key', None)
url = f'https://thepdgt-bot.herokuapp.com/listlink?api_key={api_key}'
response = requests.get(url)
if response.status_code != 200:
return make_response(f'Errore nel caricare la lista', 400)
response = response.json()
error = False
for item in response:
rId = item['id']
print(rId)
print(item['id'])
url = f'https://api.rebrandly.com/v1/links/{rId}?apikey={api_key}'
response1 = requests.delete(url)
if response1.status_code != 200:
error = True
if error:
r = {
'status': f'Errore con alcuni link'
}
return make_response(jsonify(r),401)
else:
r = {
'status': f'OK'
}
return make_response(jsonify(r), 200)
#FINE API
#INIZIO TELEGRAM
TOKEN = environ.get('BOT_TOKEN')
def inviaMessaggio(chat_id, text):
url = f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={chat_id}&text={text}'
requests.post(url)
return 0
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
req = request.get_json()
chat_id = req.get('message').get('chat').get('id')
username = req.get('message').get('chat').get('first_name')
messageText = req.get('message').get('text')
text = f''
status = getStatus(chat_id)
if getAuth(chat_id) == f'1':
if messageText == '/start':
text = (f'Ciao, {username}!\n'
f'Usa il comando /help per avere le informazioni sul mio funzionamento mentre usa /cmd per visualizzare i comandi disponibili.\n')
elif messageText == '/help':
text = (f'Questo Bot permette di creare dei ShortURL attraverso il sito https://rebrandly.com.\n'
f'Per potermi usare prima devi collegare la tua API KEY fornita dal sito attraverso il comando /collegakey (per ottenerla devi prima creare un account).\n'
f'Usa il comando /cmd per visualizzare i comandi disponibili e il loro funzionamento.')
elif messageText == '/cmd':
text =( f'Ecco i comandi:\n'
f'/collegakey : Per collegare o aggiornare la tua chiave al bot\n'
f'/infoaccount : Per ottenere le informazioni relative al tuo account\n'
f'/infolink : Per ottenere le informazioni relative ad un link\n'
f'/newlink : Per creare un nuovo ShortLink\n'
f'/alllinks : Per ottenere tutti i link creati\n'
f'/deletelink : Per eliminare un Link\n'
f'/deletealllink : Per eliminare tutti i link\n'
f'/countlink : Per vedere i link creati\n'
f'/logout : Per fare il logout dall\'account\n'
f'/help: Per ottenere aiuto'
f'/annulla : Per annullare l\'ultimo comando\n')
elif messageText == '/annulla':
text = f'Comando annullato\n'
setStatus(chat_id, f'0')
elif messageText == '/collegakey':
text = f'Okei, ora inviami la Key che vuoi associare al bot\n'
setStatus(chat_id, f'1')
elif (messageText == '/infoaccount' or messageText == '/infolink' or messageText == '/newlink' or messageText == '/alllinks' or messageText == '/deletelink' or messageText == '/changekey' or messageText == '/countlink' or messageText == '/deletealllink' or messageText == '/logout'):
if getApiKeyFromChatId(chat_id) != f'-1':
if messageText == '/infoaccount':
api_key = getApiKeyFromChatId(chat_id)
if api_key != -1:
url = f'https://thepdgt-bot.herokuapp.com/accountinfo?api_key={api_key}'
response = requests.get(url)
if response.status_code != 200:
text = f'Errore nell\'ottenere le informazioni'
else:
response = response.json()
rUsername = response.get('username')
rEmail = response.get('email')
rFullName = response.get('fullName')
rType = response.get('typeAccount')
rLinksUsed = response.get('linksUsed')
rLinksMaxLimit = response.get('maxLimitLinks')
rLinksBlocked = response.get('rLinksBlocked')
rId = response["id"]
text = f'ID Account: {rId}\nUsername: {rUsername}\nEmail: {rEmail}\nNome: {rFullName}\nLink usati: {rLinksUsed}\nLink Massimi Creabili: {rLinksMaxLimit}\nLink bloccati: {rLinksBlocked}\nTipo account: {rType}'
else:
text = f'Account non trovato'
elif messageText == '/logout':
text = f'Logout effettuato\n'
setAuth(chat_id, f'0')
elif messageText == '/infolink':
text = f'Okei, ora imviami l\'ID del Link di cui vuoi visualizzare le informazioni\n'
setStatus(chat_id, f'2')
elif messageText == '/newlink':
text | |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks_dev/rolling.ipynb (unless otherwise specified).
__all__ = ['make_generic_rolling_features', 'make_generic_resampling_and_shift_features',
'create_rolling_resampled_features', 'make_generic_rolling_features',
'make_generic_resampling_and_shift_features', 'create_rolling_resampled_features']
# Cell
from functools import reduce, partial
import os
import datetime as dt
from tqdm import tqdm
from warnings import warn
import pandas as pd
import numpy as np
import numba
from dask import dataframe as dd
from dask import delayed
from dask.diagnostics import ProgressBar
# Cell
def _get_index_rolling_windows(rolling_obj):
'''
get positional indexes of rows of each rolling window
'''
if hasattr(rolling_obj, '_selection'):
previous_selection = getattr(rolling_obj, '_selection')
else:
previous_selection = None
INDEX_LIST = []
#define function to append values to global INDEX_LIST since rolling apply won't let return arrays
def f(x): INDEX_LIST.append(x.astype(int)); return 0
assert '__indexer__' not in rolling_obj.obj.columns, 'DataFrame should not contain any col with "__indexer__" name'
rolling_obj.obj = rolling_obj.obj.assign(__indexer__ = np.arange(len(rolling_obj.obj)), inplace = True)
rolling_obj._selection = '__indexer__'
rolling_obj.apply(f, raw = True)
rolling_obj.obj = rolling_obj.obj.drop(columns = ['__indexer__'])
delattr(rolling_obj, '_selection')
if not previous_selection is None:
setattr(rolling_obj, '_selection', previous_selection)
return INDEX_LIST
def _apply_custom_rolling(rolling_obj, func, raw = True, engine = 'numpy', *args, **kwargs):
engines = {
'numpy':_rolling_apply_custom_agg_numpy,
'pandas':_rolling_apply_custom_agg_pandas,
'numba':_rolling_apply_custom_agg_numpy_jit
}
_rolling_apply = engines[engine]
indexes = _get_index_rolling_windows(rolling_obj)
if hasattr(rolling_obj, '_selection'):
if getattr(rolling_obj, '_selection') is None:
values = _rolling_apply(rolling_obj.obj, indexes, func, *args, **kwargs)
values = _rolling_apply(rolling_obj.obj[rolling_obj._selection], indexes, func, *args, **kwargs)
else:
values = _rolling_apply(rolling_obj.obj, indexes, func, *args, **kwargs)
return values
def _rolling_apply_custom_agg_numpy_jit(df, indexes, func):
'''
applies some aggregation function over groups defined by index.
groups are numpy arrays
'''
dfv = df.values
# template of output to create empty array
#use this for jit version
shape = np.array(func(dfv[:1])).shape
#d = [np.empty(*shape) for _ in range(len(indexes))]
result_array = np.empty((len(indexes),*shape))
@numba.jit(forceobj=True)
def _roll_apply(dfv, indexes, func, result_array):
for i in np.arange(len(indexes)):
data = dfv[indexes[i]]
if len(data) > 0:
result = func(data)
result_array[i] = result
else:
result = np.empty(shape)
return result_array
return _roll_apply(dfv, indexes, func, result_array)
def _rolling_apply_custom_agg_numpy(df, indexes, func, *args, **kwargs):
'''
applies some aggregation function over groups defined by index.
groups are numpy arrays
'''
dfv = df.values
d = [[] for _ in range(len(indexes))]
for i in tqdm(range(len(indexes))):
data = dfv[indexes[i]]
if len(data) > 0:
result = func(data, *args, **kwargs)
d[i] = result
return d
def _rolling_apply_custom_agg_pandas(df, indexes, func, *args, **kwargs):
'''
applies some aggregation function over groups defined by index.
groups are pandas dataframes
'''
# template of output to create empty array
d = [[] for _ in range(len(indexes))]
for i in tqdm(range(len(indexes))):
data = df.iloc[indexes[i]]
if len(data) > 0:
result = func(data, *args, **kwargs)
d[i] = result
return pd.concat(d)
# Cell
def _make_rolling_groupby_object(df, group_columns, date_column):
'''
helping function to make computational graph creation faster
'''
groupby_object = df.set_index(date_column).groupby(group_columns)
return groupby_object
def make_generic_rolling_features(
df,
calculate_columns,
group_columns,
date_column,
suffix = None,
rolling_operation = 'mean',
window = '60D',
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
**rolling_operation_kwargs
):
'''
make generic/custom rolling opeartion for a given column, grouped by customer, having Data de Emissao as date index
if calculate cols is None, than use all cols
Parameters
----------
df: DataFrame
DataFrame to make rolling features over
calculate_columns: list of str
list of columns to perform rolling_operation over
group_columns: list of str
list of columns passed to GroupBy operator prior to rolling
date_column: str
datetime column to roll over
suffix: Str
suffix for features names
rolling_operation: Str of aggregation function, deafult = "mean"
str representing groupby object method, such as mean, var, quantile ...
window:
DataFrameGroupBy.Rolling parameter. please refer to documentation
min_periods:
DataFrameGroupBy.Rolling parameter. please refer to documentation
center:
DataFrameGroupBy.Rolling parameter. please refer to documentation
win_type:
DataFrameGroupBy.Rolling parameter. please refer to documentation
on:
DataFrameGroupBy.Rolling parameter. please refer to documentation
axis:
DataFrameGroupBy.Rolling parameter. please refer to documentation
closed:
DataFrameGroupBy.Rolling parameter. please refer to documentation
rolling_operation_kwargs:
key word arguments passed to rolling_operation
Returns
-------
DataFrame with the new calculated features
'''
assert group_columns.__class__ in (set, tuple, list), 'group_columns type should be one of (tuple, list, set), not {group_columns.__class__}'
if calculate_columns is None:
calculate_columns = [i for i in df.columns if not i in [*group_columns, date_column]]
keep_columns = [*group_columns, date_column, *calculate_columns]
if not isinstance(df,(
dd.groupby.DataFrameGroupBy,
pd.core.groupby.generic.DataFrameGroupBy,
pd.core.groupby.generic.SeriesGroupBy,
dd.groupby.SeriesGroupBy
)):
df = _make_rolling_groupby_object(df, group_columns, date_column)
if isinstance(df, (pd.core.groupby.generic.DataFrameGroupBy, pd.core.groupby.generic.SeriesGroupBy)):
df = getattr(
df[calculate_columns]
.rolling(
window = window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed
),
rolling_operation,
)(**rolling_operation_kwargs).reset_index()
else: #syntax for dask groupby rolling
df = df[calculate_columns].apply(
lambda x: getattr(
x.sort_index().rolling(
window = window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed
),
rolling_operation,
)(**rolling_operation_kwargs).reset_index()
#meta = meta, #works only for float rolling
).reset_index().drop(columns = [f'level_{len(group_columns)}']) #drop unwanted "level_n" cols
if not suffix:
df.columns = [
f'{col}__rolling_{rolling_operation}_{window}_{str(rolling_operation_kwargs)}'
if not col in (*group_columns, date_column) else col
for col in df.columns
]
else:
df.columns = [
f'{col}__rolling_{window}_{suffix}'
if not col in (*group_columns, date_column) else col
for col in df.columns
]
return df
def _make_shift_resample_groupby_object(df, group_columns, date_column,freq, n_periods_shift):
groupby_object = (
df
.assign(**{date_column:df[date_column] + pd.Timedelta(n_periods_shift,freq)}) #shift
.set_index(date_column)
.groupby([*group_columns, pd.Grouper(freq = freq)])
)
return groupby_object
def make_generic_resampling_and_shift_features(
df, calculate_columns, group_columns, date_column, freq = 'm',
agg = 'last', n_periods_shift = 0, assert_frequency = False, suffix = '',**agg_kwargs
):
'''
makes generic resamples (aggregates by time frequency) on column.
shifts one period up to avoid information leakage.
Doing this through this function, although imposing some limitations to resampling periods, is much more efficient than
pandas datetime-set_index + groupby + resampling.
Parameters
----------
df: DataFrame
DataFrame to make rolling features over
calculate_columns: list of str
list of columns to perform rolling_operation over
group_columns: list of str
list of columns passed to GroupBy operator prior to rolling
date_column: str
datetime column to roll over
freq: valid pandas freq str:
frequency to resample data
agg: Str of aggregation function, deafult = "last"
str representing groupby object method, such as mean, var, last ...
n_periods_shift: int
number of periods to perform the shift opeartion. shifting is important after aggregation to avoid information leakage
e.g. assuming you have the information of the end of the month in the beggining of the month.
assert_frequency: bool, default = False
resamples data to match freq, using foward fill method for
missing values
suffix: Str
suffix for features names
agg_kwargs:
key word arguments passed to agg
Returns
-------
DataFrame with the new calculated features
'''
if calculate_columns is None:
calculate_columns = [i for i in df.columns if not i in [*group_columns, date_column]]
keep_columns = [*group_columns, date_column, *calculate_columns]
df = (
df
.assign(**{date_column:df[date_column] + pd.Timedelta(n_periods_shift,freq)}) #shift
.set_index(date_column)
.groupby([*group_columns, pd.Grouper(freq = freq)])
)
if isinstance(agg, str):
df = getattr(df[calculate_columns], agg)(**agg_kwargs)
else:
df = df[calculate_columns].apply(lambda x: agg(x,**agg_kwargs))
if not suffix:
df.columns = [f'{i}__{str(agg)}_{str(agg_kwargs)}' for i in df.columns]
else:
df.columns = [f'{i}__{suffix}' for i in df.columns]
#create new shifted date_col
#df.loc[:, date_column] = date_col_values
if assert_frequency:
df = df.reset_index()
df = df.set_index(date_column).groupby(group_columns).resample(freq).fillna(method = 'ffill')
resetable_indexes = list(set(df.index.names) - set(df.columns))
df = df.reset_index(level = resetable_indexes)
df = df.reset_index(drop = True)
return df
def create_rolling_resampled_features(
df,
calculate_columns,
group_columns,
date_column,
extra_columns = [],
n_periods_shift = 1,
rolling_first = True,
rolling_operation = 'mean',
window = '60D',
resample_freq = 'm',
resample_agg = 'last',
assert_frequency = False,
rolling_suffix = '',
resample_suffix = '',
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
rolling_operation_kwargs = {},
resample_agg_kwargs = {}
):
'''
calculates rolling features groupwise, than resamples according to resample period.
calculations can be done the other way arround if rolling_first is set to False
Parameters
----------
df: DataFrame
DataFrame to make rolling features over
calculate_columns: list of str
list of columns to perform rolling_operation over
group_columns: list of str
list of columns passed to GroupBy operator prior to rolling
date_column: str
datetime column to roll over
extra_columns: list of str
list of extra columns to be passed to the final dataframe without aggregation (takes the last values, assumes they're constant along groupby).
usefull to pass merge keys
n_periods_shift: int
number of periods to perform the shift opeartion. shifting is important after aggregation to avoid information leakage
e.g. assuming you have the information of the end of the month in the beggining of the month.
rolling_first: bool, deafult = True
whether to perform rolling before resampling, or the other way arround
rolling_operation: Str of aggregation function, deafult = "mean"
str representing groupby object | |
2.
Tam: The scaled time between the split and the end of ancient migration.
Ts: The scaled time between the end of ancient migration and present (in units of 2*Na generations).
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to hrf*nu1 and hrf*nu2 and the migration rates to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tam, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
# We keep the population sizes after the split to hrf*nu1 and hrf*nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
###
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
#### Sum the two spectra in proportion O and 1-O
fs= ((1-Q)*fsnr + Q*fslr)
return fs
def AM2N2m(params, (n1,n2), pts):
nu1, nu2, hrf, m12, m21, me12, me21, Tam, Ts, P, Q = params
"""
Model of semi permeability with split, ancient migration with 2 migration rates, heterogenous effective population size (2 classes, shared by the two populations = background selection)
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the ancient migration (in units of 2*Na generations).
Tam: The scale time between the ancient migration and present.
P: The proportion of the genome evolving neutrally
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to m12 and m21
phiN = dadi.Integration.two_pops(phiN, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phiN = dadi.Integration.two_pops(phiN, xx, Ts, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
#### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to me12 and me21
phiI = dadi.Integration.two_pops(phiI, xx, Ts, nu1, nu2, m12=me12, m21=me21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rate to zero
phiI = dadi.Integration.two_pops(phiI, xx, Tam, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
fsI = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to hrf*nu1 and hrf*nu2 and the migration rates to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tam, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
# We keep the population sizes after the split to hrf*nu1 and hrf*nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
###
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
#### Sum the spectra
fs = (P*fsN+(1-P)*fsI+(1-Q)*fsnr+Q*fslr)
return fs
def AM2NG(params, (n1,n2), pts):
nu1, nu2, b1, b2, hrf, m12, m21, Tam, Ts, Q = params
"""
Model with split, ancient migration, heterogenous effective population size (with 2 classes of loci shared by the two populations = Hill-Robertson effects)
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Tam: The scaled time between the split and the end of ancient migration.
Ts: The scaled time between the end of ancient migration and present (in units of 2*Na generations).
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We start the population size change after the split independantly in each population and set the migration rates to zero
bnu1_func = lambda t: nu1 * b1**(t/Ts)
bnu2_func = lambda t: nu2 * b2**(t/Ts)
phinr = dadi.Integration.two_pops(phinr, xx, Ts, bnu1_func, bnu2_func, m12=0, m21=0)
###
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tam, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
# We start the population size change after the split independantly (bnu{1,2}_func) & integrate the hrf for low-recombining regions in each population and set the migration rates to zero
bnu1hrf_func = lambda t: (nu1 * b1**(t/Ts)) * hrf
bnu2hrf_func = lambda t: (nu2 * b1**(t/Ts)) * hrf
philr = dadi.Integration.two_pops(philr, xx, Ts, bnu1hrf_func, bnu2hrf_func, m12=0, m21=0)
###
| |
required, but the next branch is
# implied so we exit
wc_flag, wc_pos = None, 0
else:
wc_flag, wc_pos, wc_implicit = self._check(
keys,
branch["*"],
flags=branch["*"].get("__", flags),
i=i + 1,
explicit=explicit,
l=l,
)
# if debug:
# print("")
# print("KEYS (inner)", keys[:i], "pos", i, "flags", flags, "length", l, "expl", explicit, "impl", implicit)
# print("key", key, "flag", key_flag, "implicit", key_implicit, "pos", key_pos, "wc flag", wc_flag, "wc implicit", wc_implicit, "wc pos", wc_pos)
# explicit namespace match required but not found
if explicit and key_pos == 0 and wc_pos == 0:
return None, i, implicit
# RETURN wildcard path permission PASS-1
# wildcard path produced a permission flag
if wc_flag is not None and (not explicit or not wc_implicit):
# RETURN wildcard path permission PASS-1-CHECK-1
#
# we check if wildcard path length is deeper
# than exact match path length.
if key_pos < wc_pos:
# 1. wildcard permission is not implied or both wildcard
# and exact permission are implied
#
# 2. current branch permission is implied or an explicit
# path is required
if (not wc_implicit or key_implicit) and (implicit or explicit):
return wc_flag, wc_pos, wc_implicit
# RETURN wildcard path permission PASS-1-CHECK-2
#
# 1. exact key path has NOT produced a permission
#
# 2. current branch permission is implied or an explicit
# path is required
if key_flag is None and (implicit or explicit):
return wc_flag, wc_pos, wc_implicit
# RETURN exact path permission PASS-1
# exact key path produced a permission flag
if key_flag is not None and (not explicit or not key_implicit):
# RETURN exact key path permission PASS-1-CHECK-1
#
# if the exact path permission is not implied or the
# current permission is also implied
if not key_implicit or implicit:
return key_flag, key_pos, key_implicit
# RETURN exact key path permission PASS-1-CHECK-2
#
# if there are no flags on the current branch (first match)
if flags is None:
return key_flag, key_pos, key_implicit
# RETURN wildcard path permission PASS-2
# wildcard produced a permission flag, lets check against
# current branch
if wc_flag is not None and (not explicit or not wc_implicit):
# RETURN wildcard path permission PASS-2-CHECK-1
#
# if the wildcard path permission is not implied or the
# current permission is also implied
if not wc_implicit or implicit:
return wc_flag, wc_pos, wc_implicit
# RETURN wildcard path permission PASS-1-CHECK-2
#
# if there are no flags on the current branch (first match)
if flags is None:
return wc_flag, wc_pos, wc_implicit
# following neither wildard nor exact match produced
# a permission flag, return current branch permissions
return flags, i, implicit
def get_permissions(self, namespace, explicit=False):
"""
Returns the permissions level for the specified namespace
**Arguments**
- namespace (`str`): permissioning namespace
**Keyword Arguments**
- explicit (`bool=False`): require explicitly set permissions to the provided namespace
**Returns**
`int`: permission mask
"""
if not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
keys = namespace.keys
p, pos, implicit = self._check(keys, self.index, explicit=explicit)
if not p or (explicit and implicit) or (explicit and pos != len(keys)):
p = 0
return p
def expandable(self, namespace):
"""
Returns whether or not the submitted namespace is expandable.
An expandable namespace is any namespace that contains "?"
keys.
**Arguments**
- namespace (`str`): permissioning namespace
**Returns**
- `bool`
"""
if not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
return "?" in namespace.keys
def expand(
self, namespace, explicit=False, index=None, path=None, length=0, exact=False
):
"""
Expands "?" parts of a namespace into a list of namespaces
**Arguments**
- namespace (`str`): permissioning namespace
**Returns**
- `list`: list of namespaces
"""
if not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
keys = namespace.keys
if not index:
index = self.index
if not path:
path = []
if not length:
length = len(keys)
token = keys[0]
result = []
for k in list(index.keys()):
if k[0] == "_":
continue
if token == k or token == "?" or k == "*":
if k == "*" and token != "?":
_path = path + [token]
else:
_path = path + [k]
if (len(_path) == length or not exact) and (
index[k]["__"] or not explicit
):
_namespace = Namespace(_path)
if _namespace.value:
result.append(_namespace)
result += [
ns
for ns in self.expand(
keys[1:],
index=index[k],
path=_path,
length=length,
explicit=explicit,
exact=exact,
)
]
return list(set(result))
def check(self, namespace, level, explicit=False):
"""
Checks if the permset has permission to the specified namespace
at the specified level
**Arguments**
- namespace (`str`): permissioning namespace
- level (`int`): permission flag, `PERM_READ` for example
**Keyword Arguments**
- explicit (`bool=False`): require explicitly set permissions to the provided namespace
**Returns**
`bool`: `True` if permissioned `False` if not
"""
if self.expandable(namespace):
for _namespace in self.expand(namespace):
if self.get_permissions(_namespace, explicit=explicit) & level != 0:
return True
return False
return (self.get_permissions(namespace, explicit=explicit) & level) != 0
def apply(self, data, path=None, applicator=None):
"""
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
**Arguments**
- data (`dict`)
**Keyword Arguments**
- applicator (`Applicator=None`): allows you to specify the
applicator instance to use. If none is specified an instance
of `Applicator` will be used.
**Returns**
`dict`: cleaned data
"""
if applicator:
applicator.pset = self
else:
applicator = Applicator(self)
return applicator.apply(data, path=path)
class Applicator:
"""
Handles application of permissions to a dataset contained
in a dict
Any data that is not permissioned to be read will be removed
during application of permissions.
"""
def __init__(self, pset):
self.pset = pset
self.handlers = {}
def handler(self, path, key=None, explicit=False, **kwargs):
if not isinstance(path, Namespace):
path = Namespace(path, strip=False)
handler = {"namespace": path, "key": key, "explicit": explicit}
handler.update(**kwargs)
self.handlers[str(path)] = handler
def find_handler(self, path):
handler = None
if path and self.handlers:
namespace = Namespace(path, strip=False)
for _handler in list(self.handlers.values()):
if namespace.match(_handler.get("namespace").keys, partial=False):
handler = _handler
break
return handler
def apply(self, data, path=None):
"""
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
**Arguments**
- data (`dict`)
**Returns**
`dict`: cleaned data
"""
if path is None:
path = []
if not isinstance(data, dict):
return data
def _enumerate(value):
if isinstance(value, list):
yield from enumerate(value)
elif isinstance(value, dict):
yield from list(value.items())
def _set(container, key, value):
if isinstance(container, list):
container.append(value)
else:
container[key] = value
def _apply(ramap, value, status=False, wc=False, path=[]):
if not isinstance(value, dict) and not isinstance(value, list):
if status:
return value
else:
return None
status = ramap.get("__", status)
handler = None
key_handler = None
if path and self.handlers:
namespace = Namespace(path)
for _handler in list(self.handlers.values()):
if namespace.match(_handler.get("namespace").keys, partial=False):
handler = _handler
key_handler = handler.get("key")
break
if isinstance(value, list):
if not key_handler:
key_handler = list_key_handler
rv = []
else:
rv = {}
for k, v in _enumerate(value):
if key_handler:
k = key_handler(v, k)
k = str(k)
if isinstance(v, dict) or isinstance(v, list):
if k in ramap:
r = _apply(ramap[k], v, status=status, path=path + [k])
if r:
_set(rv, k, r)
elif "*" in ramap:
r = _apply(
ramap["*"], v, status=status, wc=True, path=path + [k]
)
if r:
_set(rv, k, r)
elif status:
_set(rv, k, v)
else:
if k in ramap:
if ramap[k].get("__", True):
_set(rv, k, v)
elif "*" in ramap and ramap["*"].get("__", True):
_set(rv, k, v)
elif status:
_set(rv, k, v)
return rv
# loop through all the handlers that specify the `explicit` arguments
# and temprorarily add deny rules for those to the targeted permissionset
tmpns = {}
for ns, handler in list(self.handlers.items()):
if handler.get("explicit"):
p = self.pset.get_permissions(ns)
if p & const.PERM_READ:
exists = False
for _ns in self.pset.namespaces:
if Namespace(_ns).match(Namespace(ns).keys, partial=False):
exists = True
break
if exists:
continue
tmpns[ns] = p
self.pset[ns] = const.PERM_DENY
# apply permissions
rv = _apply(self.pset.read_access_map, data)
# remove temporarily added deny rules
for ns, p in list(tmpns.items()):
if p is None:
del self.pset[ns]
else:
self.pset[ns] = p
return rv
class NamespaceKeyApplicator(Applicator):
"""
Applicator that looks for permission namespaces from
a specified field in the dict it is scanning
"""
# field name | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for color conversion functions.
Authors
-------
- the rgb2hsv test was written by <NAME>, 2009
- other tests written by <NAME>, 2009
:license: modified BSD
"""
from __future__ import division
import os.path
import numpy as np
from skimage._shared.testing import assert_equal, assert_almost_equal
from skimage._shared.testing import assert_array_almost_equal
from skimage._shared.testing import TestCase
from skimage import img_as_float, img_as_ubyte
from skimage.io import imread
from skimage.color import (rgb2hsv, hsv2rgb,
rgb2xyz, xyz2rgb,
rgb2hed, hed2rgb,
separate_stains,
combine_stains,
rgb2rgbcie, rgbcie2rgb,
convert_colorspace,
rgb2grey, gray2rgb,
xyz2lab, lab2xyz,
lab2rgb, rgb2lab,
xyz2luv, luv2xyz,
luv2rgb, rgb2luv,
lab2lch, lch2lab,
rgb2yuv, yuv2rgb,
rgb2yiq, yiq2rgb,
rgb2ypbpr, ypbpr2rgb,
rgb2ycbcr, ycbcr2rgb,
rgba2rgb,
guess_spatial_dimensions)
from skimage import data_dir
from skimage._shared._warnings import expected_warnings
from skimage._shared import testing
import colorsys
def test_guess_spatial_dimensions():
im1 = np.zeros((5, 5))
im2 = np.zeros((5, 5, 5))
im3 = np.zeros((5, 5, 3))
im4 = np.zeros((5, 5, 5, 3))
im5 = np.zeros((5,))
assert_equal(guess_spatial_dimensions(im1), 2)
assert_equal(guess_spatial_dimensions(im2), 3)
assert_equal(guess_spatial_dimensions(im3), None)
assert_equal(guess_spatial_dimensions(im4), 3)
with testing.raises(ValueError):
guess_spatial_dimensions(im5)
class TestColorconv(TestCase):
img_rgb = imread(os.path.join(data_dir, 'color.png'))
img_grayscale = imread(os.path.join(data_dir, 'camera.png'))
img_rgba = np.array([[[0, 0.5, 1, 0],
[0, 0.5, 1, 1],
[0, 0.5, 1, 0.5]]]).astype(np.float)
colbars = np.array([[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 0]]).astype(np.float)
colbars_array = np.swapaxes(colbars.reshape(3, 4, 2), 0, 2)
colbars_point75 = colbars * 0.75
colbars_point75_array = np.swapaxes(colbars_point75.reshape(3, 4, 2), 0, 2)
xyz_array = np.array([[[0.4124, 0.21260, 0.01930]], # red
[[0, 0, 0]], # black
[[.9505, 1., 1.089]], # white
[[.1805, .0722, .9505]], # blue
[[.07719, .15438, .02573]], # green
])
lab_array = np.array([[[53.233, 80.109, 67.220]], # red
[[0., 0., 0.]], # black
[[100.0, 0.005, -0.010]], # white
[[32.303, 79.197, -107.864]], # blue
[[46.229, -51.7, 49.898]], # green
])
luv_array = np.array([[[53.233, 175.053, 37.751]], # red
[[0., 0., 0.]], # black
[[100., 0.001, -0.017]], # white
[[32.303, -9.400, -130.358]], # blue
[[46.228, -43.774, 56.589]], # green
])
# RGBA to RGB
def test_rgba2rgb_conversion(self):
rgba = self.img_rgba
rgb = rgba2rgb(rgba)
expected = np.array([[[1, 1, 1],
[0, 0.5, 1],
[0.5, 0.75, 1]]]).astype(np.float)
self.assertEqual(rgb.shape, expected.shape)
assert_almost_equal(rgb, expected)
def test_rgba2rgb_error_grayscale(self):
self.assertRaises(ValueError, rgba2rgb, self.img_grayscale)
def test_rgba2rgb_error_rgb(self):
self.assertRaises(ValueError, rgba2rgb, self.img_rgb)
# RGB to HSV
def test_rgb2hsv_conversion(self):
rgb = img_as_float(self.img_rgb)[::16, ::16]
hsv = rgb2hsv(rgb).reshape(-1, 3)
# ground truth from colorsys
gt = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3)]
)
assert_almost_equal(hsv, gt)
def test_rgb2hsv_error_grayscale(self):
self.assertRaises(ValueError, rgb2hsv, self.img_grayscale)
def test_rgb2hsv_error_one_element(self):
self.assertRaises(ValueError, rgb2hsv, self.img_rgb[0, 0])
# HSV to RGB
def test_hsv2rgb_conversion(self):
rgb = self.img_rgb.astype("float32")[::16, ::16]
# create HSV image with colorsys
hsv = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3)]).reshape(rgb.shape)
# convert back to RGB and compare with original.
# relative precision for RGB -> HSV roundtrip is about 1e-6
assert_almost_equal(rgb, hsv2rgb(hsv), decimal=4)
def test_hsv2rgb_error_grayscale(self):
self.assertRaises(ValueError, hsv2rgb, self.img_grayscale)
def test_hsv2rgb_error_one_element(self):
self.assertRaises(ValueError, hsv2rgb, self.img_rgb[0, 0])
# RGB to XYZ
def test_rgb2xyz_conversion(self):
gt = np.array([[[0.950456, 1. , 1.088754],
[0.538003, 0.787329, 1.06942 ],
[0.592876, 0.28484 , 0.969561],
[0.180423, 0.072169, 0.950227]],
[[0.770033, 0.927831, 0.138527],
[0.35758 , 0.71516 , 0.119193],
[0.412453, 0.212671, 0.019334],
[0. , 0. , 0. ]]])
assert_almost_equal(rgb2xyz(self.colbars_array), gt)
# stop repeating the "raises" checks for all other functions that are
# implemented with color._convert()
def test_rgb2xyz_error_grayscale(self):
self.assertRaises(ValueError, rgb2xyz, self.img_grayscale)
def test_rgb2xyz_error_one_element(self):
self.assertRaises(ValueError, rgb2xyz, self.img_rgb[0, 0])
# XYZ to RGB
def test_xyz2rgb_conversion(self):
assert_almost_equal(xyz2rgb(rgb2xyz(self.colbars_array)),
self.colbars_array)
# RGB<->XYZ roundtrip on another image
def test_xyz_rgb_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(xyz2rgb(rgb2xyz(img_rgb)), img_rgb)
# RGB<->HED roundtrip with ubyte image
def test_hed_rgb_roundtrip(self):
img_rgb = img_as_ubyte(self.img_rgb)
with expected_warnings(['precision loss']):
new = img_as_ubyte(hed2rgb(rgb2hed(img_rgb)))
assert_equal(new, img_rgb)
# RGB<->HED roundtrip with float image
def test_hed_rgb_float_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(hed2rgb(rgb2hed(img_rgb)), img_rgb)
# RGB<->HDX roundtrip with ubyte image
def test_hdx_rgb_roundtrip(self):
from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx
img_rgb = self.img_rgb
conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb),
rgb_from_hdx)
assert_equal(img_as_ubyte(conv), img_rgb)
# RGB<->HDX roundtrip with ubyte image
def test_hdx_rgb_roundtrip(self):
from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx
img_rgb = img_as_float(self.img_rgb)
conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb),
rgb_from_hdx)
assert_array_almost_equal(conv, img_rgb)
# RGB to RGB CIE
def test_rgb2rgbcie_conversion(self):
gt = np.array([[[ 0.1488856 , 0.18288098, 0.19277574],
[ 0.01163224, 0.16649536, 0.18948516],
[ 0.12259182, 0.03308008, 0.17298223],
[-0.01466154, 0.01669446, 0.16969164]],
[[ 0.16354714, 0.16618652, 0.0230841 ],
[ 0.02629378, 0.1498009 , 0.01979351],
[ 0.13725336, 0.01638562, 0.00329059],
[ 0. , 0. , 0. ]]])
assert_almost_equal(rgb2rgbcie(self.colbars_array), gt)
# RGB CIE to RGB
def test_rgbcie2rgb_conversion(self):
# only roundtrip test, we checked rgb2rgbcie above already
assert_almost_equal(rgbcie2rgb(rgb2rgbcie(self.colbars_array)),
self.colbars_array)
def test_convert_colorspace(self):
colspaces = ['HSV', 'RGB CIE', 'XYZ', 'YCbCr', 'YPbPr']
colfuncs_from = [hsv2rgb, rgbcie2rgb, xyz2rgb, ycbcr2rgb, ypbpr2rgb]
colfuncs_to = [rgb2hsv, rgb2rgbcie, rgb2xyz, rgb2ycbcr, rgb2ypbpr]
assert_almost_equal(
convert_colorspace(self.colbars_array, 'RGB', 'RGB'),
self.colbars_array)
for i, space in enumerate(colspaces):
gt = colfuncs_from[i](self.colbars_array)
assert_almost_equal(
convert_colorspace(self.colbars_array, space, 'RGB'), gt)
gt = colfuncs_to[i](self.colbars_array)
assert_almost_equal(
convert_colorspace(self.colbars_array, 'RGB', space), gt)
self.assertRaises(ValueError, convert_colorspace,
self.colbars_array, 'nokey', 'XYZ')
self.assertRaises(ValueError, convert_colorspace,
self.colbars_array, 'RGB', 'nokey')
def test_rgb2grey(self):
x = np.array([1, 1, 1]).reshape((1, 1, 3)).astype(np.float)
g = rgb2grey(x)
assert_array_almost_equal(g, 1)
assert_equal(g.shape, (1, 1))
def test_rgb2grey_contiguous(self):
x = np.random.rand(10, 10, 3)
assert rgb2grey(x).flags["C_CONTIGUOUS"]
assert rgb2grey(x[:5, :5]).flags["C_CONTIGUOUS"]
def test_rgb2grey_alpha(self):
x = np.random.rand(10, 10, 4)
assert rgb2grey(x).ndim == 2
def test_rgb2grey_on_grey(self):
rgb2grey(np.random.rand(5, 5))
# test matrices for xyz2lab and lab2xyz generated using
# http://www.easyrgb.com/index.php?X=CALC
# Note: easyrgb website displays xyz*100
def test_xyz2lab(self):
assert_array_almost_equal(xyz2lab(self.xyz_array),
self.lab_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "lab_array_{0}_{1}.npy".format(I, obs)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab_array_I_obs,
xyz2lab(self.xyz_array, I, obs),
decimal=2)
for I in ["a", "e"]:
fname = "lab_array_{0}_2.npy".format(I)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab_array_I_obs,
xyz2lab(self.xyz_array, I, "2"),
decimal=2)
def test_lab2xyz(self):
assert_array_almost_equal(lab2xyz(self.lab_array),
self.xyz_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "lab_array_{0}_{1}.npy".format(I, obs)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab2xyz(lab_array_I_obs, I, obs),
self.xyz_array, decimal=3)
for I in ["a", "e"]:
fname = "lab_array_{0}_2.npy".format(I, obs)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab2xyz(lab_array_I_obs, I, "2"),
self.xyz_array, decimal=3)
# And we include a call to test the exception handling in the code.
try:
xs = lab2xyz(lab_array_I_obs, "NaI", "2") # Not an illuminant
except ValueError:
pass
try:
xs = lab2xyz(lab_array_I_obs, "d50", "42") # Not a degree
except ValueError:
pass
def test_rgb2lab_brucelindbloom(self):
"""
Test the RGB->Lab conversion by comparing to the calculator on the
authoritative Bruce Lindbloom
[website](http://brucelindbloom.com/index.html?ColorCalculator.html).
"""
# Obtained with D65 white point, sRGB model and gamma
gt_for_colbars = np.array([
[100,0,0],
[97.1393, -21.5537, 94.4780],
[91.1132, -48.0875, -14.1312],
[87.7347, -86.1827, 83.1793],
[60.3242, 98.2343, -60.8249],
[53.2408, 80.0925, 67.2032],
[32.2970, 79.1875, -107.8602],
[0,0,0]]).T
gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
assert_array_almost_equal(rgb2lab(self.colbars_array), gt_array, decimal=2)
def test_lab_rgb_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(lab2rgb(rgb2lab(img_rgb)), img_rgb)
# test matrices for xyz2luv and luv2xyz generated using
# http://www.easyrgb.com/index.php?X=CALC
# Note: easyrgb website displays xyz*100
def test_xyz2luv(self):
assert_array_almost_equal(xyz2luv(self.xyz_array),
self.luv_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "luv_array_{0}_{1}.npy".format(I, obs)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv_array_I_obs,
xyz2luv(self.xyz_array, I, obs),
decimal=2)
for I in ["a", "e"]:
fname = "luv_array_{0}_2.npy".format(I)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv_array_I_obs,
xyz2luv(self.xyz_array, I, "2"),
decimal=2)
def test_luv2xyz(self):
assert_array_almost_equal(luv2xyz(self.luv_array),
self.xyz_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "luv_array_{0}_{1}.npy".format(I, obs)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv2xyz(luv_array_I_obs, I, obs),
self.xyz_array, decimal=3)
for I in ["a", "e"]:
fname = "luv_array_{0}_2.npy".format(I, obs)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv2xyz(luv_array_I_obs, I, "2"),
self.xyz_array, decimal=3)
def test_rgb2luv_brucelindbloom(self):
"""
Test the RGB->Lab conversion by comparing to the calculator on the
authoritative Bruce Lindbloom
[website](http://brucelindbloom.com/index.html?ColorCalculator.html).
"""
# Obtained with D65 white point, sRGB model and gamma
gt_for_colbars = np.array([
[100, 0, 0],
[97.1393, 7.7056, 106.7866],
[91.1132, -70.4773, -15.2042],
[87.7347, -83.0776, 107.3985],
[60.3242, 84.0714, -108.6834],
[53.2408, 175.0151, 37.7564],
[32.2970, -9.4054, -130.3423],
[0, 0, 0]]).T
gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
assert_array_almost_equal(rgb2luv(self.colbars_array),
gt_array, decimal=2)
def test_luv_rgb_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(luv2rgb(rgb2luv(img_rgb)), img_rgb)
def test_lab_rgb_outlier(self):
lab_array = np.ones((3, 1, 3))
lab_array[0] = [50, -12, 85]
lab_array[1] = [50, 12, -85]
lab_array[2] = [90, -4, -47]
rgb_array = np.array([[[0.501, 0.481, 0]],
[[0, 0.482, 1.]],
[[0.578, 0.914, 1.]],
])
assert_almost_equal(lab2rgb(lab_array), rgb_array, decimal=3)
def test_lab_full_gamut(self):
a, b = np.meshgrid(np.arange(-100, 100), np.arange(-100, 100))
L = np.ones(a.shape)
lab = np.dstack((L, a, b))
for value in [0, | |
"""pipelinerunner.py unit tests."""
import logging
import pytest
from unittest.mock import call, patch
from pypyr.cache.loadercache import pypeloader_cache
from pypyr.cache.parsercache import contextparser_cache
from pypyr.cache.pipelinecache import pipeline_cache
from pypyr.context import Context
from pypyr.errors import (ContextError,
KeyNotInContextError,
PyModuleNotFoundError,
Stop,
StopPipeline,
StopStepGroup)
import pypyr.moduleloader
import pypyr.pipelinerunner
from tests.common.utils import DeepCopyMagicMock
# ------------------------- parser mocks -------------------------------------#
from tests.common.utils import patch_logger
def mock_parser(args):
"""Arbitrary mock function to execute instead of get_parsed_context."""
return Context({'key1': 'created in mock parser', 'key2': args})
def mock_parser_none(args):
"""Return None, mocking get_parsed_context."""
return None
# ------------------------- parser mocks -------------------------------------#
# ------------------------- get_parsed_context--------------------------------#
def test_get_parsed_context_no_parser():
"""On get_parsed_context return empty Context when no parser specified."""
context = pypyr.pipelinerunner.get_parsed_context({}, None)
assert isinstance(context, Context)
assert len(context) == 0
def test_get_parsed_context_parser_not_found():
"""On get_parsed_context raise if parser module specified but not found."""
with pytest.raises(PyModuleNotFoundError):
pypyr.pipelinerunner.get_parsed_context(
{'context_parser': 'unlikelyblahmodulenameherexxssz'}, None)
@patch('pypyr.moduleloader.get_module')
def test_get_parsed_context_parser_returns_none(mocked_moduleloader):
"""On get_parsed_context return empty Context when parser returns None."""
mocked_moduleloader.return_value.get_parsed_context = mock_parser_none
context = pypyr.pipelinerunner.get_parsed_context(
{'context_parser': 'specifiedparserhere'}, ['in arg here'])
mocked_moduleloader.assert_called_once_with('specifiedparserhere')
assert isinstance(context, Context)
assert len(context) == 0
@patch('pypyr.moduleloader.get_module')
def test_get_parsed_context_parser_pass(mocked_moduleloader):
"""On get_parsed_context pass arg param and returns context."""
contextparser_cache.clear()
mocked_moduleloader.return_value.get_parsed_context = mock_parser
context = pypyr.pipelinerunner.get_parsed_context(
{'context_parser': 'specifiedparserhere'}, 'in arg here')
mocked_moduleloader.assert_called_once_with('specifiedparserhere')
assert isinstance(context, Context)
assert len(context) == 2
assert context['key1'] == 'created in mock parser'
assert context['key2'] == 'in arg here'
@patch('pypyr.moduleloader.get_module', return_value=3)
def test_get_parser_context_signature_wrong(mocked_moduleloader):
"""Raise when parser found but no get_parsed_context attr."""
contextparser_cache.clear()
with pytest.raises(AttributeError) as err_info:
pypyr.pipelinerunner.get_parsed_context(
{'context_parser': 'specifiedparserhere'}, 'in arg here')
assert str(err_info.value) == ("'int' object has no attribute "
"'get_parsed_context'")
# ------------------------- get_parsed_context--------------------------------#
# ------------------------- main ---------------------------------------------#
@patch('pypyr.log.logger.set_up_notify_log_level')
@patch('pypyr.pipelinerunner.load_and_run_pipeline')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='arb/dir')
def test_main_pass(mocked_get_mocked_work_dir,
mocked_set_work_dir,
mocked_run_pipeline,
mocked_set_up_notify):
"""Main initializes and runs pipelines."""
pipeline_cache.clear()
pypyr.pipelinerunner.main(pipeline_name='arb pipe',
pipeline_context_input='arb context input',
working_dir='arb/dir',
groups=['g'],
success_group='sg',
failure_group='fg')
mocked_set_up_notify.assert_called_once()
mocked_set_work_dir.assert_called_once_with('arb/dir')
mocked_run_pipeline.assert_called_once_with(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
groups=['g'],
success_group='sg',
failure_group='fg')
@patch('pypyr.pipelinerunner.load_and_run_pipeline',
side_effect=ContextError('arb'))
@patch('pypyr.moduleloader.set_working_directory')
def test_main_fail(mocked_work_dir, mocked_run_pipeline):
"""Main raise unhandled error on pipeline failure."""
pipeline_cache.clear()
with pytest.raises(ContextError) as err_info:
pypyr.pipelinerunner.main(pipeline_name='arb pipe',
pipeline_context_input='arb context input',
working_dir='arb/dir')
assert str(err_info.value) == "arb"
mocked_work_dir.assert_called_once_with('arb/dir')
mocked_run_pipeline.assert_called_once_with(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
groups=None,
success_group=None,
failure_group=None)
# ------------------------- main ---------------------------------------------#
# ------------------------- prepare_context - --------------------------------#
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context())
def test_prepare_context_empty_parse(mocked_get_parsed_context):
"""Empty parsed_context works."""
context = Context({'c1': 'cv1', 'c2': 'cv2'})
pypyr.pipelinerunner.prepare_context(pipeline='pipe def',
context_in_args='arb context input',
context=context)
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
assert context == {'c1': 'cv1', 'c2': 'cv2'}
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context({'a': 'av1', 'c1': 'new value from parsed'}))
def test_prepare_context_with_parse_merge(mocked_get_parsed_context):
"""On parsed_context override context."""
context = Context({'c1': 'cv1', 'c2': 'cv2'})
pypyr.pipelinerunner.prepare_context(pipeline='pipe def',
context_in_args='arb context input',
context=context)
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
assert context == {'a': 'av1', 'c1': 'new value from parsed', 'c2': 'cv2'}
# ------------------------- prepare_context - --------------------------------#
# ------------------------- run_pipeline -------------------------------------#
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context({'a': 'b'}))
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='arb/dir')
def test_load_and_run_pipeline_pass(mocked_get_work_dir,
mocked_set_work_dir,
mocked_get_pipe_def,
mocked_get_parsed_context,
mocked_steps_runner):
"""On run_pipeline pass correct params to all methods."""
pipeline_cache.clear()
pypeloader_cache.clear()
with patch('pypyr.context.Context') as mock_context:
mock_context.return_value = Context()
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input')
mocked_set_work_dir.assert_not_called()
mocked_get_pipe_def.assert_called_once_with(pipeline_name='arb pipe',
working_dir='arb/dir')
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
# assure that freshly created context instance does have working dir set
assert mock_context.return_value.working_dir == 'arb/dir'
mocked_steps_runner.assert_called_once_with(pipeline_definition='pipe def',
context={'a': 'b'})
# No called steps, just on_failure since err on parse context already
sr = mocked_steps_runner.return_value
sr.run_step_groups.assert_called_once_with(groups=['steps'],
success_group='on_success',
failure_group='on_failure')
sr.run_failure_step_group.assert_not_called()
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context())
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='arb/dir')
def test_load_and_run_pipeline_pass_skip_parse_context(
mocked_get_work_dir,
mocked_set_work_dir,
mocked_get_pipe_def,
mocked_get_parsed_context,
mocked_steps_runner):
"""run_pipeline passes correct params to all methods."""
pipeline_cache.clear()
pypeloader_cache.clear()
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
parse_input=False)
mocked_set_work_dir.assert_not_called()
mocked_get_pipe_def.assert_called_once_with(pipeline_name='arb pipe',
working_dir='arb/dir')
mocked_get_parsed_context.assert_not_called()
mocked_steps_runner.assert_called_once_with(pipeline_definition='pipe def',
context={})
# No called steps, just on_failure since err on parse context already
sr = mocked_steps_runner.return_value
sr.run_step_groups.assert_called_once_with(groups=['steps'],
success_group='on_success',
failure_group='on_failure')
sr.run_failure_step_group.assert_not_called()
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context')
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='arb/dir')
def test_load_and_run_pipeline_parse_context_error(
mocked_get_work_dir,
mocked_set_work_dir,
mocked_get_pipe_def,
mocked_get_parsed_context,
mocked_steps_runner):
"""run_pipeline on_failure with empty Context if context parse fails."""
pipeline_cache.clear()
pypeloader_cache.clear()
mocked_get_parsed_context.side_effect = ContextError
with pytest.raises(ContextError):
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input')
mocked_set_work_dir.assert_not_called()
mocked_get_pipe_def.assert_called_once_with(pipeline_name='arb pipe',
working_dir='arb/dir')
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
mocked_steps_runner.assert_called_once_with(pipeline_definition='pipe def',
context=Context())
# No called steps, just on_failure since err on parse context already
sr = mocked_steps_runner.return_value
sr.run_step_groups.assert_not_called()
sr.run_failure_step_group.assert_called_once_with('on_failure')
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context())
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='arb/dir')
def test_load_and_run_pipeline_steps_error_raises(
mocked_get_work_dir,
mocked_set_work_dir,
mocked_get_pipe_def,
mocked_get_parsed_context,
mocked_steps_runner):
"""run_pipeline raises error if steps group fails."""
pipeline_cache.clear()
pypeloader_cache.clear()
# First time it runs is steps - give a KeyNotInContextError.
mocked_steps_runner.return_value.run_step_groups.side_effect = (
KeyNotInContextError)
with pytest.raises(KeyNotInContextError):
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input')
mocked_set_work_dir.assert_not_called()
mocked_get_pipe_def.assert_called_once_with(pipeline_name='arb pipe',
working_dir='arb/dir')
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
mocked_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group='on_success',
failure_group='on_failure'
)
mocked_steps_runner.assert_called_once_with(pipeline_definition='pipe def',
context={})
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context({'1': 'context 1', '2': 'context2'}))
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='from/context')
def test_load_and_run_pipeline_with_existing_context_pass(
mocked_get_work_dir,
mocked_set_work_dir,
mocked_get_pipe_def,
mocked_get_parsed_context,
mocked_steps_runner):
"""Run run_pipeline pass correct params to all methods."""
pipeline_cache.clear()
pypeloader_cache.clear()
existing_context = Context({'2': 'original', '3': 'new'})
existing_context.working_dir = 'from/context'
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
context=existing_context)
assert existing_context.working_dir == 'from/context'
mocked_set_work_dir.assert_not_called()
mocked_get_pipe_def.assert_called_once_with(pipeline_name='arb pipe',
working_dir='from/context')
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
mocked_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group='on_success',
failure_group='on_failure'
)
mocked_steps_runner.assert_called_once_with(pipeline_definition='pipe def',
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context({'1': 'context 1', '2': 'context2'}))
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='from/context')
def test_load_and_run_pipeline_with_group_specified(
mocked_get_work_dir,
mocked_set_work_dir,
mocked_get_pipe_def,
mocked_get_parsed_context,
mocked_steps_runner):
"""Pass run_pipeline with specified group."""
pipeline_cache.clear()
pypeloader_cache.clear()
existing_context = Context({'2': 'original', '3': 'new'})
existing_context.working_dir = 'from/context'
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
context=existing_context,
groups=['arb1', 'arb2'])
assert existing_context.working_dir == 'from/context'
mocked_set_work_dir.assert_not_called()
mocked_get_pipe_def.assert_called_once_with(pipeline_name='arb pipe',
working_dir='from/context')
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
mocked_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['arb1', 'arb2'],
success_group=None,
failure_group=None
)
mocked_steps_runner.assert_called_once_with(pipeline_definition='pipe def',
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context({'1': 'context 1', '2': 'context2'}))
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='from/context')
def test_load_and_run_pipeline_with_success_group_specified(
mocked_get_work_dir,
mocked_set_work_dir,
mocked_get_pipe_def,
mocked_get_parsed_context,
mocked_steps_runner):
"""Pass run_pipeline with specified success group."""
pipeline_cache.clear()
pypeloader_cache.clear()
existing_context = Context({'2': 'original', '3': 'new'})
existing_context.working_dir = 'from/context'
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
context=existing_context,
success_group='arb1')
assert existing_context.working_dir == 'from/context'
mocked_set_work_dir.assert_not_called()
mocked_get_pipe_def.assert_called_once_with(pipeline_name='arb pipe',
working_dir='from/context')
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
mocked_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group='arb1',
failure_group=None
)
mocked_steps_runner.assert_called_once_with(pipeline_definition='pipe def',
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context({'1': 'context 1', '2': 'context2'}))
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='from/context')
def test_load_and_run_pipeline_with_failure_group_specified(
mocked_get_work_dir,
mocked_set_work_dir,
mocked_get_pipe_def,
mocked_get_parsed_context,
mocked_steps_runner):
"""Pass run_pipeline with specified failure group."""
pipeline_cache.clear()
pypeloader_cache.clear()
existing_context = Context({'2': 'original', '3': 'new'})
existing_context.working_dir = 'from/context'
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
context=existing_context,
failure_group='arb1')
assert existing_context.working_dir == 'from/context'
mocked_set_work_dir.assert_not_called()
mocked_get_pipe_def.assert_called_once_with(pipeline_name='arb pipe',
working_dir='from/context')
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
mocked_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group=None,
failure_group='arb1'
)
mocked_steps_runner.assert_called_once_with(pipeline_definition='pipe def',
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context',
return_value=Context({'1': 'context 1', '2': 'context2'}))
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
@patch('pypyr.moduleloader.set_working_directory')
@patch('pypyr.moduleloader.get_working_directory', return_value='from/context')
def test_load_and_run_pipeline_with_group_and_failure_group_specified(
mocked_get_work_dir,
mocked_set_work_dir,
mocked_get_pipe_def,
mocked_get_parsed_context,
mocked_steps_runner):
"""Pass run_pipeline with specified group and failure group."""
pipeline_cache.clear()
pypeloader_cache.clear()
existing_context = Context({'2': 'original', '3': 'new'})
existing_context.working_dir = 'from/context'
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
context=existing_context,
groups=['arb1'],
failure_group='arb2')
assert existing_context.working_dir == 'from/context'
mocked_set_work_dir.assert_not_called()
mocked_get_pipe_def.assert_called_once_with(pipeline_name='arb pipe',
working_dir='from/context')
mocked_get_parsed_context.assert_called_once_with(
pipeline='pipe def',
context_in_args='arb context input')
mocked_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['arb1'],
success_group=None,
failure_group='arb2'
)
mocked_steps_runner.assert_called_once_with(pipeline_definition='pipe def',
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context')
def test_run_pipeline_parse_context_error_failure(
mocked_get_parsed_context,
mocked_steps_runner):
"""Raise on_failure from run_pipeline."""
mocked_get_parsed_context.side_effect = ValueError('arb')
sr = mocked_steps_runner.return_value
ctx = pypyr.context.Context()
with pytest.raises(ValueError) as err:
pypyr.pipelinerunner.run_pipeline(
pipeline='arb pipe',
context=ctx,
pipeline_context_input='arb context input',
groups=['gr'],
success_group='sg',
failure_group='fg')
assert str(err.value) == 'arb'
mocked_get_parsed_context.assert_called_once_with(
pipeline='arb pipe',
context_in_args='arb context input')
mocked_steps_runner.assert_called_once_with(pipeline_definition='arb pipe',
context=ctx)
# No called steps, just on_failure since err on parse context already
sr.run_step_groups.assert_not_called()
sr.run_failure_step_group.assert_called_once_with('fg')
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context')
def test_run_pipeline_parse_context_error_failure_stop(
mocked_get_parsed_context,
mocked_steps_runner):
"""Raise on_failure from run_pipeline with Stop."""
mocked_get_parsed_context.side_effect = ValueError('arb')
sr = mocked_steps_runner.return_value
sr.run_failure_step_group.side_effect = Stop()
ctx = pypyr.context.Context()
with pytest.raises(Stop):
pypyr.pipelinerunner.run_pipeline(
pipeline='arb pipe',
context=ctx,
pipeline_context_input='arb context input')
mocked_get_parsed_context.assert_called_once_with(
pipeline='arb pipe',
context_in_args='arb context input')
mocked_steps_runner.assert_called_once_with(pipeline_definition='arb pipe',
context=ctx)
# No called steps, just on_failure since err on parse context already
sr.run_step_groups.assert_not_called()
sr.run_failure_step_group.assert_called_once_with('on_failure')
@patch('pypyr.pipelinerunner.StepsRunner', autospec=True)
@patch('pypyr.pipelinerunner.get_parsed_context')
def test_run_pipeline_parse_context_error_failure_stopstepgroup(
mocked_get_parsed_context,
mocked_steps_runner):
"""With run_pipeline on_failure swallow StopStepGroup."""
mocked_get_parsed_context.side_effect = ValueError('arb')
sr = mocked_steps_runner.return_value
sr.run_failure_step_group.side_effect = StopStepGroup()
ctx = pypyr.context.Context()
with pytest.raises(ValueError) as err:
pypyr.pipelinerunner.run_pipeline(
pipeline='arb pipe',
context=ctx,
pipeline_context_input='arb context input')
assert str(err.value) == 'arb'
mocked_get_parsed_context.assert_called_once_with(
pipeline='arb pipe',
context_in_args='arb context input')
mocked_steps_runner.assert_called_once_with(pipeline_definition='arb pipe',
context=ctx)
# No called steps, just on_failure since err on parse context already
sr.run_step_groups.assert_not_called()
sr.run_failure_step_group.assert_called_once_with('on_failure')
# ------------------------- run_pipeline -------------------------------------#
# ------------------------- loader -------------------------------------------#
def test_arbitrary_loader_module_not_found():
"""Raise when loader not found."""
with pytest.raises(PyModuleNotFoundError):
pipeline_cache.clear()
pypyr.moduleloader.set_working_directory('arb/dir')
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
loader='not_found_loader'
)
def test_loader_no_get_pipeline_definition():
"""Arbitrary loader module without `get_pipeline_definition` function."""
import sys
current_module = sys.modules[__name__]
pypyr.moduleloader.set_working_directory('arb/dir')
with patch_logger(
'pypyr.cache.loadercache',
logging.ERROR) as mock_logger_error:
with pytest.raises(AttributeError) as err:
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
loader=__name__)
assert str(err.value) == f"module '{__name__}' " \
"has no attribute 'get_pipeline_definition'"
mock_logger_error.assert_called_once_with(
f"The pipeline loader {current_module} doesn't have a "
"get_pipeline_definition(pipeline_name, working_dir) function."
)
@patch('pypyr.pipelinerunner.run_pipeline')
@patch('pypyr.pypeloaders.fileloader.get_pipeline_definition',
return_value='pipe def')
def test_empty_loader_set_up_to_default(mock_get_pipeline_definition,
mock_run_pipeline):
"""Default loader should be pypyr.pypeloaders.fileloader."""
pypyr.moduleloader.set_working_directory('arb/dir')
pipeline_cache.clear()
pypeloader_cache.clear()
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
)
mock_get_pipeline_definition.assert_called_once_with(
pipeline_name='arb pipe',
working_dir='arb/dir'
)
mock_run_pipeline.assert_called_once_with(
context={},
parse_input=True,
pipeline='pipe def',
pipeline_context_input='arb context input',
groups=None,
success_group=None,
failure_group=None
)
@patch('pypyr.pipelinerunner.run_pipeline')
def test_arb_loader(mock_run_pipeline):
"""Test loader set up."""
pypyr.moduleloader.set_working_directory('tests')
pipeline_cache.clear()
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
loader='arbpack.arbloader',
groups=None,
success_group=None,
failure_group=None
)
mock_run_pipeline.assert_called_once_with(
context={},
parse_input=True,
pipeline={'pipeline_name': 'arb pipe',
'working_dir': | |
from abc import ABC
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from data import denormalize, normalize
from utils import load_data
class RelationEncoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RelationEncoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
nn.ReLU()
)
def forward(self, x):
"""
Args:
x: [n_relations, input_size]
Returns:
[n_relations, output_size]
"""
return self.model(x)
class ParticleEncoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(ParticleEncoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
nn.ReLU()
)
def forward(self, x):
"""
Args:
x: [n_particles, input_size]
Returns:
[n_particles, output_size]
"""
return self.model(x)
class Propagator(nn.Module):
def __init__(self, input_size, output_size, residual=False):
super(Propagator, self).__init__()
self.residual = residual
self.linear = nn.Linear(input_size, output_size)
self.relu = nn.ReLU()
def forward(self, x, res=None):
"""
Args:
x: [n_relations/n_particles, input_size]
Returns:
[n_relations/n_particles, output_size]
"""
if self.residual:
x = self.relu(self.linear(x) + res)
else:
x = self.relu(self.linear(x))
return x
class ParticlePredictor(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(ParticlePredictor, self).__init__()
self.linear_0 = nn.Linear(input_size, hidden_size)
self.linear_1 = nn.Linear(hidden_size, output_size)
self.relu = nn.ReLU()
def forward(self, x):
"""
Args:
x: [n_particles, input_size]
Returns:
[n_particles, output_size]
"""
x = self.relu(self.linear_0(x))
return self.linear_1(x)
class l2_norm_layer(nn.Module):
def __init__(self):
super(l2_norm_layer, self).__init__()
def forward(self, x):
"""
:param x: B x D
:return:
"""
norm_x = torch.sqrt((x ** 2).sum(1) + 1e-10)
return x / norm_x[:, None]
class PropagationNetwork(nn.Module):
def __init__(self, args, input_particle_dim=None, input_relation_dim=None, output_dim=None, action=True, tanh=False,
residual=False, use_gpu=False):
super(PropagationNetwork, self).__init__()
self.args = args
self.action = action
if input_particle_dim is None:
input_particle_dim = args.attr_dim + args.state_dim
input_particle_dim += args.action_dim if action else 0
if input_relation_dim is None:
input_relation_dim = args.relation_dim + args.state_dim
if output_dim is None:
output_dim = args.state_dim
nf_particle = args.nf_particle
nf_relation = args.nf_relation
nf_effect = args.nf_effect
self.nf_effect = args.nf_effect
self.use_gpu = use_gpu
self.residual = residual
# (1) state
self.obj_encoder = ParticleEncoder(input_particle_dim, nf_particle, nf_effect)
# (1) state receiver (2) state_diff
self.relation_encoder = RelationEncoder(input_relation_dim, nf_relation, nf_relation)
# (1) relation encode (2) sender effect (3) receiver effect
self.relation_propagator = Propagator(nf_relation + 2 * nf_effect, nf_effect)
# (1) particle encode (2) particle effect
self.particle_propagator = Propagator(2 * nf_effect, nf_effect, self.residual)
# rigid predictor
# (1) particle encode (2) set particle effect
self.particle_predictor = ParticlePredictor(nf_effect, nf_effect, output_dim)
if tanh:
self.particle_predictor = nn.Sequential(
self.particle_predictor, nn.Tanh()
)
def forward(self, attrs, states, actions, rel_attrs, pstep):
"""
:param attrs: B x N x attr_dim
:param states: B x N x state_dim
:param actions: B x N x action_dim
:param rel_attrs: B x N x N x relation_dim
:param pstep: 1 or 2
:return:
"""
B, N = attrs.size(0), attrs.size(1)
'''encode node'''
obj_input_list = [attrs, states]
if self.action:
obj_input_list += [actions]
tmp = torch.cat(obj_input_list, 2)
obj_encode = self.obj_encoder(tmp.reshape(tmp.size(0) * tmp.size(1), tmp.size(2))).reshape(B, N, -1)
'''encode edge'''
rel_states = states[:, :, None, :] - states[:, None, :, :]
receiver_attr = attrs[:, :, None, :].repeat(1, 1, N, 1)
sender_attr = attrs[:, None, :, :].repeat(1, N, 1, 1)
tmp = torch.cat([rel_attrs, rel_states, receiver_attr, sender_attr], 3)
rel_encode = self.relation_encoder(tmp.reshape(B * N * N, -1)).reshape(B, N, N, -1)
for i in range(pstep):
'''calculate relation effect'''
receiver_code = obj_encode[:, :, None, :].repeat(1, 1, N, 1)
sender_code = obj_encode[:, None, :, :].repeat(1, N, 1, 1)
tmp = torch.cat([rel_encode, receiver_code, sender_code], 3)
rel_effect = self.relation_propagator(tmp.reshape(B * N * N, -1)).reshape(B, N, N, -1)
'''aggregating relation effect'''
rel_agg_effect = rel_effect.sum(2)
'''calc particle effect'''
tmp = torch.cat([obj_encode, rel_agg_effect], 2)
obj_encode = self.particle_propagator(tmp.reshape(B * N, -1)).reshape(B, N, -1)
obj_prediction = self.particle_predictor(obj_encode.reshape(B * N, -1)).reshape(B, N, -1)
return obj_prediction
# ======================================================================================================================
class CompositionalKoopmanOperators(nn.Module, ABC):
def __init__(self, args, residual=False, use_gpu=False):
super(CompositionalKoopmanOperators, self).__init__()
self.args = args
self.stat = load_data(['attrs', 'states', 'actions'], args.stat_path)
g_dim = args.g_dim
self.nf_effect = args.nf_effect
self.use_gpu = use_gpu
self.residual = residual
''' state '''
# we should not include action in state encoder
input_particle_dim = args.attr_dim + args.state_dim
input_relation_dim = args.state_dim + args.relation_dim + args.attr_dim * 2
# print('state_encoder', 'node', input_particle_dim, 'edge', input_relation_dim)
self.state_encoder = PropagationNetwork(
args, input_particle_dim=input_particle_dim, input_relation_dim=input_relation_dim,
output_dim=g_dim, action=False, tanh=True, # use tanh to enforce the shape of the code space
residual=residual, use_gpu=use_gpu)
# the state for decoding phase is replaced with code of g_dim
input_particle_dim = args.attr_dim + args.g_dim
input_relation_dim = args.g_dim + args.relation_dim + args.attr_dim * 2
# print('state_decoder', 'node', input_particle_dim, 'edge', input_relation_dim)
self.state_decoder = PropagationNetwork(
args, input_particle_dim=input_particle_dim, input_relation_dim=input_relation_dim,
output_dim=args.state_dim, action=False, tanh=False,
residual=residual, use_gpu=use_gpu)
''' dynamical system coefficient: A and B '''
self.A = None
self.B = None
if args.fit_type == 'structured':
self.system_identify = self.fit
self.simulate = self.rollout
self.step = self.linear_forward
if args.fit_type == 'unstructured':
self.system_identify = self.fit_unstructured
self.simulate = self.rollout_unstructured
self.step = self.linear_forward_unstructured
elif args.fit_type == 'diagonal':
self.system_identify = self.fit_diagonal
self.simulate = self.rollout_diagonal
self.step = self.linear_forward_diagonal
def to_s(self, attrs, gcodes, rel_attrs, pstep):
""" state decoder """
if self.args.env in ['Soft', 'Swim']:
states = self.state_decoder(attrs=attrs, states=gcodes, actions=None, rel_attrs=rel_attrs, pstep=pstep)
return regularize_state_Soft(states, rel_attrs, self.stat)
return self.state_decoder(attrs=attrs, states=gcodes, actions=None, rel_attrs=rel_attrs, pstep=pstep)
def to_g(self, attrs, states, rel_attrs, pstep):
""" state encoder """
return self.state_encoder(attrs=attrs, states=states, actions=None, rel_attrs=rel_attrs, pstep=pstep)
@staticmethod
def get_aug(G, rel_attrs):
"""
:param G: B x T x N x D
:param rel_attrs: B x N x N x R
:return:
"""
B, T, N, D = G.size()
R = rel_attrs.size(-1)
sumG_list = []
for i in range(R):
''' B x T x N x N '''
adj = rel_attrs[:, :, :, i][:, None, :, :].repeat(1, T, 1, 1)
sumG = torch.bmm(
adj.reshape(B * T, N, N),
G.reshape(B * T, N, D)
).reshape(B, T, N, D)
sumG_list.append(sumG)
augG = torch.cat(sumG_list, 3)
return augG
# structured A
def fit(self, G, H, U, rel_attrs, I_factor):
"""
:param G: B x T x N x D
:param H: B x T x N x D
:param U: B x T x N x a_dim
:param rel_attrs: B x N x N x R (relation_dim) rel_attrs[i,j] ==> receiver i, sender j
:param I_factor: scalor
:return:
A: B x R D x D
B: B x R a_dim x D
s.t.
H = augG @ A + augU @ B
"""
''' B x R: sqrt(# of appearance of block matrices of the same type)'''
rel_weights = torch.sqrt(rel_attrs.sum(1).sum(1))
rel_weights = torch.clamp(rel_weights, min=1e-8)
bs, T, N, D = G.size()
R = rel_attrs.size(-1)
a_dim = U.size(3)
''' B x T x N x R D '''
augG = self.get_aug(G, rel_attrs)
''' B x T x N x R a_dim'''
augU = self.get_aug(U, rel_attrs)
augG_reweight = augG.reshape(bs, T, N, R, D) / rel_weights[:, None, None, :, None]
augU_reweight = augU.reshape(bs, T, N, R, a_dim) / rel_weights[:, None, None, :, None]
''' B x TN x R(D + a_dim)'''
GU_reweight = torch.cat([augG_reweight.reshape(bs, T * N, R * D),
augU_reweight.reshape(bs, T * N, R * a_dim)], 2)
'''B x (R * D + R * a_dim) x D'''
AB_reweight = torch.bmm(
self.batch_pinv(GU_reweight, I_factor),
H.reshape(bs, T * N, D)
)
self.A = AB_reweight[:, :R * D].reshape(bs, R, D, D) / rel_weights[:, :, None, None]
self.B = AB_reweight[:, R * D:].reshape(bs, R, a_dim, D) / rel_weights[:, :, None, None]
self.A = self.A.reshape(bs, R * D, D)
self.B = self.B.reshape(bs, R * a_dim, D)
fit_err = H.reshape(bs, T * N, D) - torch.bmm(GU_reweight, AB_reweight)
fit_err = torch.sqrt((fit_err ** 2).mean())
return self.A, self.B, fit_err
def linear_forward(self, g, u, rel_attrs):
"""
:param g: B x N x D
:param u: B x N x a_dim
:param rel_attrs: B x N x N x R
:return:
"""
''' B x N x R D '''
aug_g = self.get_aug(G=g[:, None, :, :], rel_attrs=rel_attrs)[:, 0]
''' B x N x R a_dim'''
aug_u = self.get_aug(G=u[:, None, :, :], rel_attrs=rel_attrs)[:, 0]
new_g = torch.bmm(aug_g, self.A) + torch.bmm(aug_u, self.B)
return new_g
def rollout(self, g, u_seq, T, rel_attrs):
"""
:param g: B x N x D
:param u_seq: B x T x N x a_dim
:param rel_attrs: B x N x N x R
:param T:
:return:
"""
g_list = []
for t in range(T):
g = self.linear_forward(g, u_seq[:, t], rel_attrs)
g_list.append(g[:, None, :, :])
return torch.cat(g_list, 1)
# unstructured large A
def fit_unstructured(self, G, H, U, I_factor, rel_attrs=None):
"""
:param G: B x T x N x D
:param H: B x T x N | |
:type years: int.
:param months: The number of months to add.
:type months: int.
:param weeks: The number of weeks to add.
:type weeks: int.
:param days: The number of days to add.
:type days: int.
:param hours: The number of hours to add.
:type hours: int.
:param minutes: The number of minutes to add.
:type minutes: int.
:param seconds: The number of seconds to add.
:type seconds: int.
:param milliseconds: The number of milliseconds to add.
:type milliseconds: int.
:param microseconds: The number of microseconds to add.
:type microseconds: int.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
"""
return _add_time(now(utc), years=years, months=months, weeks=weeks,
days=days, hours=hours, minutes=minutes, seconds=seconds,
milliseconds=milliseconds, microseconds=microseconds)
def how_many_leap_days(from_date, to_date):
"""Get the number of leap days between two dates
:param from_date: A datetime object. If only a year is specified,
will use January 1.
:type from_date: datetime.datetime, datetime.date
:param to_date: A datetime object.. If only a year is specified,
will use January 1.
:type to_date: datetime.datetime, datetime.date
:returns: int -- the number of leap days.
:raises: TypeError, ValueError
.. versionchanged:: 0.4.0
``TypeError`` is now raised
``ValueError`` is now raised
.. versionadded:: 0.3.0
"""
if isinstance(from_date, int):
from_date = datetime.date(from_date, 1, 1)
if isinstance(to_date, int):
to_date = datetime.date(to_date, 1, 1)
if not _is_date_type(from_date):
message = "'{0}' object is not a valid date or time."
raise TypeError(message.format(type(from_date).__name__))
if not _is_date_type(to_date):
message = "'{0}' object is not a valid date or time."
raise TypeError(message.format(type(to_date).__name__))
# Both `from_date` and `to_date` need to be of the same type.
# Since both `datetime.date` and `datetime.datetime` will pass the
# above assertions, cast any `datetime.datetime` values to
# `datetime.date`.
if isinstance(from_date, datetime.datetime):
from_date = from_date.date()
if isinstance(to_date, datetime.datetime):
to_date = to_date.date()
if from_date > to_date:
message = ("The value of 'from_date' must be before the value of "
"'to_date'.")
raise ValueError(message)
number_of_leaps = calendar.leapdays(from_date.year, to_date.year)
# `calendar.leapdays()` calculates the number of leap days by using
# January 1 for the specified years. If `from_date` occurs after
# February 28 in a leap year, remove one leap day from the total. If
# `to_date` occurs after February 28 in a leap year, add one leap
# day to the total.
if calendar.isleap(from_date.year):
month, day = from_date.month, from_date.day
if month > 2 or (month == 2 and day > 28):
number_of_leaps -= 1
if calendar.isleap(to_date.year):
month, day = to_date.month, to_date.day
if month > 2 or (month == 2 and day > 28):
number_of_leaps += 1
return number_of_leaps
def is_5_oclock():
# Congratulations, you've found an easter egg!
#
# Returns a `datetime.timedelta` object representing how much time
# is remaining until 5 o'clock. If the current time is between 5pm
# and midnight, a negative value will be returned. Keep in mind, a
# `timedelta` is considered negative when the `days` attribute is
# negative; the values for `seconds` and `microseconds` will always
# be positive.
#
# All values will be `0` at 5 o'clock.
# Because this method deals with local time, the force UTC flag will
# need to be turned off and back on if it has been set.
force = _FORCE_UTC
if force:
unset_utc()
# A `try` is used here to ensure that the UTC flag will be restored
# even if an exception is raised when calling `now()`. This should
# never be the case, but better safe than sorry.
try:
the_datetime = now()
finally:
if force:
set_utc()
five = datetime.time(17)
return datetime.datetime.combine(the_datetime.date(), five) - the_datetime
def is_timezone_aware(value):
"""Check if a datetime is time zone aware.
`is_timezone_aware()` is the inverse of `is_timezone_naive()`.
:param value: A valid datetime object.
:type value: datetime.datetime, datetime.time
:returns: bool -- if the object is time zone aware.
:raises: TypeError
.. versionchanged:: 0.4.0
``TypeError`` is raised
.. versionadded:: 0.3.0
"""
if not hasattr(value, 'tzinfo'):
message = "'{0}' object is not a valid time."
raise TypeError(message.format(type(value).__name__))
return not (value.tzinfo is None or value.tzinfo.utcoffset(value) is None)
def is_timezone_naive(value):
"""Check if a datetime is time zone naive.
`is_timezone_naive()` is the inverse of `is_timezone_aware()`.
:param value: A valid datetime object.
:type value: datetime.datetime, datetime.time
:returns: bool -- if the object is time zone naive.
:raises: TypeError
.. versionchanged:: 0.4.0
``TypeError`` is now raised
.. versionadded:: 0.3.0
"""
if not hasattr(value, 'tzinfo'):
message = "'{0}' object is not a valid time."
raise TypeError(message.format(type(value).__name__))
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def now(utc=False):
"""Get a datetime representing the current date and time.
By default ``now()`` will return the datetime in the system's local
time. If the ``utc`` parameter is set to ``True`` or ``set_utc()``
has been called, the datetime will be based on UTC instead.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the current datetime.
"""
if _FORCE_UTC or utc:
return datetime.datetime.utcnow()
else:
return datetime.datetime.now()
def past(years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0, utc=False):
"""Get a datetime in the past.
``past()`` accepts the all of the parameters of
``datetime.timedelta``, plus includes the parameters ``years`` and
``months``. ``years`` and ``months`` will add their respective units
of time to the datetime.
By default ``past()`` will return the datetime in the system's local
time. If the ``utc`` parameter is set to ``True`` or ``set_utc()``
has been called, the datetime will be based on UTC instead.
:param years: The number of years to subtract.
:type years: int.
:param months: The number of months to subtract.
:type months: int.
:param weeks: The number of weeks to subtract.
:type weeks: int.
:param days: The number of days to subtract.
:type days: int.
:param hours: The number of hours to subtract.
:type hours: int.
:param minutes: The number of minutes to subtract.
:type minutes: int.
:param seconds: The number of seconds to subtract.
:type seconds: int.
:param milliseconds: The number of milliseconds to subtract.
:type milliseconds: int.
:param microseconds: The number of microseconds to subtract.
:type microseconds: int.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
"""
return _add_time(now(utc), years=-years, months=-months, weeks=-weeks,
days=-days, hours=-hours, minutes=-minutes,
seconds=-seconds, milliseconds=milliseconds,
microseconds=microseconds)
def set_utc():
"""Set all datetimes to UTC.
The ``utc`` parameter of other methods will be ignored, with the
global setting taking precedence.
This can be reset by calling ``unset_utc()``.
"""
global _FORCE_UTC # Causes pylint W0603
_FORCE_UTC = True
def shift(value, from_tz=None, to_tz=None, utc=False):
"""Convert a datetime from one time zone to another.
``value`` will be converted from its time zone (when it is time zone
aware) or the time zone specified by ``from_tz`` (when it is time
zone naive) to the time zone specified by ``to_tz``. These values
can either be strings containing the name of the time zone (see
``pytz.all_timezones`` for a list of all supported values) or a
``datetime.tzinfo`` object.
If no value is provided for either ``from_tz`` (when ``value`` is
time zone naive) or ``to_tz``, the current system time zone will be
used. If the ``utc`` parameter is set to ``True`` or ``set_utc()``
has been called, however, UTC will be used instead.
:param value: A datetime object.
:type value: datetime.datetime, datetime.time.
:param from_tz: The time zone to shift from.
:type from_tz: datetime.tzinfo, str.
:param to_tz: The time zone to shift to.
:type to_tz: datetime.tzinfo, str.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
:raises: TypeError
.. versionchanged:: 0.4.0
``TypeError`` is now raised
"""
if not hasattr(value, 'tzinfo'):
message = "'{0}' object is not a valid time."
raise TypeError(message.format(type(value).__name__))
# Check for a from timezone
# If the datetime is time zone aware, its time zone should be used. If it's
# naive, from_tz must be supplied.
if is_timezone_aware(value):
from_tz = value.tzinfo
else:
if not from_tz:
if _FORCE_UTC or utc:
from_tz = pytz.UTC
| |
0].plot(tc_tout, tc_est_profile[:, 4], 'c13', linestyle='-.', label='$TC Sim$')
p[1, 0].set_title('$v_N$')
p[1, 0].set_ylabel('$(m/s)$')
p[1, 0].legend(loc='best', prop={'size': 10})
# 3.2.b For East Velocity
p[1, 1].plot(tin, true_profile[:, 5], 'c19', label='$Orig$')
p[1, 1].hold('on')
p[1, 1].plot(lc_tout, lc_est_profile[:, 5], 'c5', linestyle='--', label='$LC Sim$')
p[1, 1].plot(tc_tout, tc_est_profile[:, 5], 'c14', linestyle='-.', label='$TC Sim$')
p[1, 1].set_title('$v_E$')
p[1, 1].legend(loc='best', prop={'size': 10})
# 3.2.c For Down Velocity
p[1, 2].plot(tin, true_profile[:, 6], 'c19', label='$Orig$')
p[1, 2].hold('on')
p[1, 2].plot(lc_tout, lc_est_profile[:, 6], 'c6', linestyle='--', label='$LC Sim$')
p[1, 2].plot(tc_tout, tc_est_profile[:, 6], 'c15', linestyle='-.', label='$TC Sim$')
p[1, 2].set_title('$v_E$')
p[1, 2].legend(loc='best', prop={'size': 10})
# 3.3 For Attitude:
# 3.3.a For Roll Angle
p[2, 0].plot(tin, r2d * true_profile[:, 7], 'c19', label='$Orig$')
p[2, 0].hold('on')
p[2, 0].plot(lc_tout, r2d * lc_est_profile[:, 7], 'c7', linestyle='--', label='$LC Sim$')
p[2, 0].plot(tc_tout, r2d * tc_est_profile[:, 7], 'c16', linestyle='-.', label='$TC Sim$')
p[2, 0].set_title('$Roll$'+' '+'$Angle$')
p[2, 0].set_ylabel('$(deg)$')
p[2, 0].set_xlabel('$t (mn)$')
p[2, 0].legend(loc='best', prop={'size': 10})
# 3.3.b For Pitch Angle
p[2, 1].plot(tin, r2d * true_profile[:, 8], 'c19', label='$Orig$')
p[2, 1].hold('on')
p[2, 1].plot(lc_tout, r2d * lc_est_profile[:, 8], 'c8', linestyle='--', label='$LC Sim$')
p[2, 1].plot(tc_tout, r2d * tc_est_profile[:, 8], 'c17', linestyle='-.', label='$TC Sim$')
p[2, 1].set_title('$Pitch$'+' '+'$Angle$')
p[2, 1].set_xlabel('$t (mn)$')
p[2, 1].legend(loc='best', prop={'size': 10})
# 3.3.c For Yaw Angle
p[2, 2].plot(tin, r2d * true_profile[:, 9], 'c19', label='$Orig$')
p[2, 2].hold('on')
p[2, 2].plot(lc_tout, r2d * lc_est_profile[:, 9], 'c9', linestyle='--', label='$LC Sim$')
p[2, 2].plot(tc_tout, r2d * tc_est_profile[:, 9], 'c18', linestyle='-.', label='$TC Sim$')
p[2, 2].set_title('$Yaw$'+' '+'$Angle$')
p[2, 2].set_xlabel('$t (mn)$')
p[2, 2].legend(loc='best', prop={'size': 10})
plt.tight_layout()
return
# End of plotting dual profiles
'''
------------------------------------
4. Plot Errors for Single Simulation
------------------------------------
'''
def plot_single_error(errors, kf_sd):
nsig = 3.0 # Number of sigmas (standard deviation)
tkf = kf_sd[:, 0] / 60.0 # Kalman Filter updating time history
ter = errors[:, 0] / 60.0 # Output errors updating time history
perfig, per = plt.subplots(3, 3, sharex=True, figsize=(12, 8))
# 1. For Position Errors:
[r_ns, r_ew] = radii_of_curv(kf_sd[0, 1])
# 1.1. For North Position Error
per[0, 0].plot(ter, errors[:, 1], 'c1', label=r'$\delta$' + '$r_N$')
per[0, 0].hold('on')
per[0, 0].plot(tkf, nsig * (kf_sd[:, 1] - kf_sd[0, 1]) * (r_ns + kf_sd[0, 3]), 'k', label=r'$3\sigma_N$')
per[0, 0].plot(tkf, -nsig * (kf_sd[:, 1] - kf_sd[0, 1]) * (r_ns + kf_sd[0, 3]), 'k')
per[0, 0].set_title(r'$\delta$' + '$r_N$')
per[0, 0].set_ylabel('$(m)$')
# 1.2. For East Position Error
per[0, 1].plot(ter, errors[:, 2], 'c2', label=r'$\delta$' + '$r_E$')
per[0, 1].hold('on')
per[0, 1].plot(tkf, nsig * (kf_sd[:, 2] - kf_sd[0, 2]) * (r_ns + kf_sd[0, 3]) * np.cos(kf_sd[0, 1]), 'k',
label=r'$3\sigma_E$')
per[0, 1].plot(tkf, -nsig * (kf_sd[:, 2] - kf_sd[0, 2]) * (r_ns + kf_sd[0, 3]) * np.cos(kf_sd[0, 1]), 'k')
per[0, 1].set_title(r'$\delta$' + '$r_E$')
# 1.3. For Down Position Error
per[0, 2].plot(ter, errors[:, 3], 'c3', label=r'$\delta$' + '$r_D$')
per[0, 2].hold('on')
per[0, 2].plot(tkf, nsig * kf_sd[:, 3], 'k', label=r'$3\sigma_D$')
per[0, 2].plot(tkf, -nsig * kf_sd[:, 3], 'k')
per[0, 2].set_title(r'$\delta$' + '$r_D$')
per[0, 2].legend(loc='best', prop={'size': 10})
# 2. For Velocity Errors:
# 2.1. For North Velocity Error
per[1, 0].plot(ter, errors[:, 4], 'c4', label=r'$\delta$' + '$v_N$')
per[1, 0].hold('on')
per[1, 0].plot(tkf, nsig * kf_sd[:, 4], 'k', label=r'$3\sigma_{v_N}$')
per[1, 0].plot(tkf, -nsig * kf_sd[:, 4], 'k')
per[1, 0].set_title(r'$\delta$' + '$v_N$')
per[1, 0].set_ylabel('$(m/s)$')
# 2.2. For East Velocity Error
per[1, 1].plot(ter, errors[:, 5], 'c5', label=r'$\delta$' + '$v_E$')
per[1, 1].hold('on')
per[1, 1].plot(tkf, nsig * kf_sd[:, 5], 'k', label=r'$3\sigma_{v_E}$')
per[1, 1].plot(tkf, -nsig * kf_sd[:, 5], 'k')
per[1, 1].set_title(r'$\delta$' + '$v_E$')
# 2.3. For Down Velocity Error
per[1, 2].plot(ter, errors[:, 6], 'c6', label=r'$\delta$' + '$v_D$')
per[1, 2].hold('on')
per[1, 2].plot(tkf, nsig * kf_sd[:, 6], 'k', label=r'$3\sigma_{v_D}$')
per[1, 2].plot(tkf, -nsig * kf_sd[:, 6], 'k')
per[1, 2].set_title(r'$\delta$' + '$v_D$')
# 3. For Attitude Errors:
# 3.1. For Roll Angle Error
per[2, 0].plot(ter, r2d * errors[:, 7], 'c7', label=r'$\delta_\phi$')
per[2, 0].hold('on')
per[2, 0].plot(tkf, nsig * r2d * kf_sd[:, 7], 'k', label=r'$3\sigma_\phi$')
per[2, 0].plot(tkf, -nsig * r2d * kf_sd[:, 7], 'k')
per[2, 0].set_title(r'$\delta\phi$')
per[2, 0].set_ylabel('$(deg)$')
per[2, 0].set_xlabel('$t (mn)$')
# 3.2. For Pitch Angle Error
per[2, 1].plot(ter, r2d * errors[:, 8], 'c8', label=r'$\delta_\theta$')
per[2, 1].hold('on')
per[2, 1].plot(tkf, nsig * r2d * kf_sd[:, 8], 'k', label=r'$3\sigma_\theta$')
per[2, 1].plot(tkf, -nsig * r2d * kf_sd[:, 8], 'k')
per[2, 1].set_title(r'$\delta\theta$')
per[2, 1].set_xlabel('$t (mn)$')
# 3.3. For Yaw Angle Error
per[2, 2].plot(ter, r2d * errors[:, 9], 'c9', label=r'$\delta_\psi$')
per[2, 2].hold('on')
per[2, 2].plot(tkf, nsig * r2d * kf_sd[:, 9], 'k', label=r'$3\sigma_\psi$')
per[2, 2].plot(tkf, -nsig * r2d * kf_sd[:, 9], 'k')
per[2, 2].set_title(r'$\delta\psi$')
per[2, 2].set_xlabel('$t (mn)$')
# perfig.suptitle("Estimation Error Profile over Time")
plt.tight_layout()
return
# End of Plotting Errors for Single Simulation
'''
----------------------------------
5. Plot Errors for Dual Simulation
----------------------------------
'''
def plot_dual_error(lc_errors, lc_kf_sd, tc_errors, tc_kf_sd):
nsig = 3.0 # Number of sigma (standard deviation)
tkf = lc_kf_sd[:, 0] / 60.0 # Kalman Filter updating time history
ter = lc_errors[:, 0] / 60.0 # Output errors updating time history
# A. Loosely Coupled Errors
lc_perfig, lc_per = plt.subplots(3, 3, sharex=True, figsize=(12, 8))
# 1. For Position Errors:
[lc_r_ns, lc_r_ew] = radii_of_curv(lc_kf_sd[0, 1])
# 1.1. For North Position Error
lc_per[0, 0].plot(ter, lc_errors[:, 1], 'c1', label=r'$\delta$' + '$r_N$')
lc_per[0, 0].hold('on')
lc_per[0, 0].plot(tkf, nsig * (lc_kf_sd[:, 1] - lc_kf_sd[0, 1]) * (lc_r_ns + lc_kf_sd[0, 3]), 'k',
label=r'$3\sigma_N$')
lc_per[0, 0].plot(tkf, -nsig * (lc_kf_sd[:, 1] - lc_kf_sd[0, 1]) * (lc_r_ns + lc_kf_sd[0, 3]), 'k')
lc_per[0, 0].set_title('$LC$' + ' ' + r'$\delta$' + '$r_N$')
lc_per[0, 0].set_ylabel('$(m)$')
# 1.2. For East Position Error
lc_per[0, 1].plot(ter, lc_errors[:, 2], 'c2', label=r'$\delta$' + '$r_E$')
lc_per[0, 1].hold('on')
lc_per[0, 1].plot(tkf, nsig * (lc_kf_sd[:, 2] - lc_kf_sd[0, 2]) * (lc_r_ns + lc_kf_sd[0, 3]) *
np.cos(lc_kf_sd[0, 1]), 'k', label=r'$3\sigma_E$')
lc_per[0, 1].plot(tkf, -nsig * (lc_kf_sd[:, 2] - lc_kf_sd[0, 2]) * (lc_r_ns + lc_kf_sd[0, 3]) *
np.cos(lc_kf_sd[0, 1]), 'k')
lc_per[0, 1].set_title('$LC$' + ' ' + r'$\delta$' + '$r_E$')
# 1.3. For Down Position Error
lc_per[0, 2].plot(ter, lc_errors[:, 3], 'c3', label=r'$\delta$' + '$r_D$')
lc_per[0, 2].hold('on')
lc_per[0, 2].plot(tkf, nsig * lc_kf_sd[:, 3], 'k', label=r'$3\sigma_D$')
lc_per[0, 2].plot(tkf, -nsig * lc_kf_sd[:, 3], 'k')
lc_per[0, 2].set_title('$LC$' + ' ' + r'$\delta$' + '$r_D$')
lc_per[0, 2].legend(loc='best', prop={'size': 10})
# 2. For Velocity Errors:
# 2.1. For North Velocity Error
lc_per[1, 0].plot(ter, lc_errors[:, 4], 'c4', label=r'$\delta$' + '$v_N$')
lc_per[1, 0].hold('on')
lc_per[1, 0].plot(tkf, nsig * lc_kf_sd[:, 4], 'k', label=r'$3\sigma_{v_N}$')
lc_per[1, 0].plot(tkf, -nsig * lc_kf_sd[:, 4], 'k')
lc_per[1, 0].set_title('$LC$' + ' ' + r'$\delta$' + '$v_N$')
lc_per[1, 0].set_ylabel('$(m/s)$')
# 2.2. For East Velocity Error
lc_per[1, 1].plot(ter, lc_errors[:, 5], 'c5', label=r'$\delta$' + '$v_E$')
lc_per[1, 1].hold('on')
lc_per[1, 1].plot(tkf, nsig * lc_kf_sd[:, 5], 'k', label=r'$3\sigma_{v_E}$')
lc_per[1, 1].plot(tkf, -nsig * lc_kf_sd[:, 5], 'k')
lc_per[1, 1].set_title('$LC$' + ' ' + r'$\delta$' + '$v_E$')
# 3.3. For Down Velocity Error
lc_per[1, 2].plot(ter, lc_errors[:, 6], 'c6', label=r'$\delta$' + '$v_D$')
lc_per[1, 2].hold('on')
lc_per[1, 2].plot(tkf, nsig * lc_kf_sd[:, 6], 'k', label=r'$3\sigma_{v_D}$')
lc_per[1, 2].plot(tkf, -nsig * lc_kf_sd[:, 6], 'k')
lc_per[1, 2].set_title('$LC$' + ' ' + r'$\delta$' + '$v_D$')
# 3. For Attitude Errors:
# 3.1. For Roll Angle Error
lc_per[2, 0].plot(ter, r2d * lc_errors[:, 7], 'c7', label=r'$\delta_\phi$')
lc_per[2, 0].hold('on')
lc_per[2, 0].plot(tkf, nsig * r2d * lc_kf_sd[:, 7], 'k', label=r'$3\sigma_\phi$')
lc_per[2, 0].plot(tkf, -nsig * r2d * lc_kf_sd[:, 7], 'k')
lc_per[2, 0].set_title('$LC$' + ' ' + r'$\delta\phi$')
lc_per[2, 0].set_ylabel('$(deg)$')
lc_per[2, 0].set_xlabel('$t (mn)$')
# 3.2. For Pitch Angle Error
lc_per[2, 1].plot(ter, r2d * lc_errors[:, 8], 'c8', label=r'$\delta_\theta$')
lc_per[2, 1].hold('on')
lc_per[2, 1].plot(tkf, nsig * r2d * lc_kf_sd[:, 8], 'k', label=r'$3\sigma_\theta$')
lc_per[2, 1].plot(tkf, -nsig * r2d * lc_kf_sd[:, 8], 'k')
lc_per[2, 1].set_title('$LC$' + ' ' + r'$\delta\theta$')
lc_per[2, 1].set_xlabel('$t (mn)$')
# 3.3. For Yaw Angle Error
lc_per[2, 2].plot(ter, r2d * lc_errors[:, 9], 'c9', label=r'$\delta_\psi$')
lc_per[2, 2].hold('on')
lc_per[2, 2].plot(tkf, nsig * r2d * lc_kf_sd[:, 9], 'k', label=r'$3\sigma_\psi$')
lc_per[2, 2].plot(tkf, -nsig * r2d * lc_kf_sd[:, 9], 'k')
lc_per[2, 2].set_title('$LC$' + ' ' + r'$\delta\psi$')
lc_per[2, 2].set_xlabel('$t (mn)$')
# lc_perfig.suptitle("Estimation Error Profile over Time")
plt.tight_layout()
# B. Tightly Coupled Errors
tc_perfig, tc_per = plt.subplots(3, 3, sharex=True, figsize=(12, 8))
# 1. For Position Errors:
[tc_r_ns, tc_r_ew] = radii_of_curv(tc_kf_sd[0, 1])
# 1.1. For North Position Error
tc_per[0, 0].plot(ter, tc_errors[:, 1], 'c10', label=r'$\delta$' + '$r_N$')
tc_per[0, 0].hold('on')
tc_per[0, 0].plot(tkf, nsig * (tc_kf_sd[:, 1] - tc_kf_sd[0, 1]) * (tc_r_ns + tc_kf_sd[0, 3]), 'k',
label=r'$3\sigma_N$')
tc_per[0, 0].plot(tkf, -nsig * (tc_kf_sd[:, 1] - tc_kf_sd[0, 1]) * (tc_r_ns + tc_kf_sd[0, 3]), 'k')
tc_per[0, 0].set_title('$TC$' + ' ' | |
from __future__ import print_function
from ROOT import TStyle, kWhite, kTRUE
from ROOT import gROOT, gStyle
from ROOT import kGray, kAzure, kMagenta, kOrange, kWhite
from ROOT import kRed, kBlue, kGreen, kPink, kYellow
from ROOT import TLine, TLatex, TColor
from collections import namedtuple, OrderedDict
from math import sin, cos, tan, atan, exp, pi
from array import array
from Validation.Geometry.plot_utils import Plot_params
plots = {}
plots.setdefault('x_vs_eta', Plot_params(10, '#eta', 'x/X_{0}', 0.0, 2.575, -4.0, 4.0, '', 0, 0., 0., 0, 1))
plots.setdefault('x_vs_phi', Plot_params(20, '#varphi [rad]', 'x/X_{0}', 0.0, 6.2, -4.0, 4.0, '', 0, 0., 0., 0, 1))
plots.setdefault('x_vs_R', Plot_params(40, 'R [cm]', 'x/X_{0}', 0.0, 70.0, 0.0, 1200.0, '', 0, 0., 0., 0, 1))
plots.setdefault('l_vs_eta', Plot_params(10010, '#eta', 'x/#lambda_{I}', 0.0, 0.73, -4.0, 4.0, '', 0, 0., 0., 0, 1))
plots.setdefault('l_vs_phi', Plot_params(10020, '#varphi [rad]', 'x/#lambda_{I}', 0.0, 1.2, -4.0, 4.0, '', 0, 0., 0., 0, 1))
plots.setdefault('l_vs_R', Plot_params(10040, 'R [cm]', 'x/#lambda_{I}', 0.0, 7.5, 0.0, 1200.0, '', 0, 0., 0., 0, 1))
plots.setdefault('x_vs_eta_vs_phi', Plot_params(30, '#eta', '#varphi', 0., 0., 0., 0., 'x/X_{0}', 0, -1., -1., 0, 1))
plots.setdefault('l_vs_eta_vs_phi', Plot_params(10030, '#eta', '#varphi', 0., 0., 0., 0., 'x/#lambda_{I}', 0, -1, -1, 0, 1))
plots.setdefault('x_vs_z_vs_Rsum', Plot_params(50, 'z [mm]', 'R [mm]', 0., 0., 0., 0., '#Sigmax/X_{0}', 1, -1., -1., 0, 0))
plots.setdefault('x_vs_z_vs_Rsumcos', Plot_params(52, 'z [mm]', 'R [mm]', 0., 0., 0., 0., '#Sigmax/X_{0}', 1, -1., -1., 0, 0))
#plots.setdefault('x_vs_z_vs_R', Plot_params(60, 'z [mm]', 'R [mm]', 0., 0., 0., 0., '1/X_{0}', 1, -1., -1., 0, 0))
plots.setdefault('x_vs_z_vs_Rloc', Plot_params(70, 'z [mm]', 'R [mm]', 0., 0., 0., 0., 'x/X_{0}', 1, -1., -1., 0, 0))
plots.setdefault('x_vs_z_vs_Rloccos', Plot_params(72, 'z [mm]', 'R [mm]', 0., 0., 0., 0., 'x/X_{0}', 1, -1., -1., 0, 0))
plots.setdefault('l_vs_z_vs_Rsum', Plot_params(10050, 'z [mm]', 'R [mm]', 0., 0., 0., 0., '#Sigmax/#lambda_{I}', 1, -1., -1., 0, 0))
plots.setdefault('l_vs_z_vs_Rsumcos', Plot_params(10052, 'z [mm]', 'R [mm]', 0., 0., 0., 0., '#Sigmax/#lambda_{I}', 1, -1., -1., 0, 0))
plots.setdefault('l_vs_z_vs_R', Plot_params(10060, 'z [mm]', 'R [mm]', 0., 0., 0., 0., '1/#lambda_{I}', 1, -1., -1., 0, 0))
plots.setdefault('l_vs_z_vs_Rloc', Plot_params(10070, 'z [mm]', 'R [mm]', 0., 0., 0., 0., 'x/#lambda_{I}', 1, -1., -1., 0, 0))
plots.setdefault('l_vs_z_vs_Rloccos', Plot_params(10072, 'z [mm]', 'R [mm]', 0., 0., 0., 0., 'x/#lambda_{I}', 1, -1., -1., 0, 0))
plots.setdefault('x_over_l_vs_eta', Plot_params(10, '#eta', '(x/X_{0})/(x/#lambda_{I})', 0., 0., 0., 0., '', 0, -1, -1, 0, 0))
plots.setdefault('x_over_l_vs_phi', Plot_params(20, '#varphi [rad]', '(x/X_{0})/(x/#lambda_{I})', 0., 0., 0., 0., '', 0, -1, -1, 0, 0))
# Conversion name from the label (key) to the components in CMSSW/Geometry
_LABELS2COMPS = {'HGCal': 'HGCal',
'HGCalEE': 'HGCalEE',
'HGCalHE': ['HGCalHEsil', 'HGCalHEmix']
}
# Compounds are used to stick together different part of the HGCal
# detector, so that their cumulative material description can be
# derived. The key name can be generic, while the names in the
# associated list must be such that an appropriate material
# description file, in ROOT format, is present while producing the
# cumulative plot. A missing element will invalidate the full
# procedure.
COMPOUNDS = OrderedDict()
COMPOUNDS["HGCal"] = ["HGCal"]
COMPOUNDS["HGCalEE"] = ["HGCalEE"]
COMPOUNDS["HGCalHE"] = ["HGCalHEsil", "HGCalHEmix"]
# The DETECTORS must be the single component of the HGCal for which
# the user can ask for the corresponding material description.
DETECTORS = OrderedDict()
DETECTORS["HGCal"] = kAzure-5
DETECTORS["HGCalEE"] = kAzure-9
DETECTORS["HGCalHE"] = kOrange-2
# sDETS are the label of the HGCal elements in the Reconstruction
# geometry. They are all used to derive the reconstruction material
# profile to be compared to the one obtained directly from the
# simulation. A missing key in the real reconstruction geometry is not
# a problem, since this will imply that the corresponding plotting
# routine will skip that missing part. For this reason this map can be
# made as inclusive as possible with respect to the many
# reconstruction geometries in CMSSW.
sDETS = OrderedDict()
sDETS["HGCalEE"] = kRed
sDETS["HGCalHEsil"] = kBlue
sDETS["HGCalHEmix"] = kGreen
#sDETS[""] = kYellow
#sDETS[""] = kOrange
#sDETS[""] = kPink
# hist_label_to_num contains the logical names of the HGCal detector
# that holds material. They are therefore not aware of which detector
# they belong to, but they are stored in specific plots in all the
# mat*root files produced. The numbering of the plots is identical
# across all files.
hist_label_to_num = OrderedDict()
hist_label_to_num['COP'] = [100, 2, 'Copper'] # Index first, color second, legend label third
hist_label_to_num['SCI'] = [200, 3, 'Scintillator']
hist_label_to_num['CAB'] = [300, 4, 'Cables']
hist_label_to_num['MNE'] = [400, 5, 'HGC_G10-FR4']
hist_label_to_num['SIL'] = [500, 6, 'Silicon']
hist_label_to_num['OTH'] = [600, 7, 'Other']
hist_label_to_num['AIR'] = [700, 8, 'Air']
hist_label_to_num['SST'] = [800, 9, 'Stainless Steel']
hist_label_to_num['WCU'] = [900, 28, 'WCu']
hist_label_to_num['LEA'] = [1000, 12, 'Lead']
hist_label_to_num['EPX'] = [1100, 46, 'Epoxy']
hist_label_to_num['KAP'] = [1200, 49, 'Kapton']
hist_label_to_num['ALU'] = [1300, 33, 'Aluminium']
def TwikiPrintout(plotname, label, zoom):
"""The plots in the twiki are already too much and to avoid mistakes
we will try to automatize the procedure
"""
#Twiki will strip out spaces
label = label.replace(" ", "_")
zoomstring = ""
if zoom == "all":
zoomstring = ""
zoomtitle = "in all HGCal"
zoomdir = "%s/" % label
elif zoom == "zplus":
zoomstring = "_ZplusZoom"
zoomtitle = "in Z+ endcap of HGCal"
zoomdir = "%s/ZPlusZoom/" % label
elif zoom == "zminus":
zoomstring = "_ZminusZoom"
zoomtitle = "in Z- endcap of HGCal"
zoomdir = "%s/ZMinusZoom/" % label
else :
print("WRONG OPTION")
#Here for the hide button
if plotname == "x_vs_z_vs_Rsum":
print("%%TWISTY{ mode=\"div\" showlink=\"Click to see the %s plots %s \" hidelink=\"Hide %s %s\" showimgright=\"%%ICONURLPATH{toggleopen-small}%%\" hideimgright=\"%%ICONURLPATH{toggleclose-small}%%\"}%%" % (label,zoomtitle, label, zoomtitle))
if "Rsum" in plotname and "x_vs" in plotname and not "cos" in plotname:
print("| <img alt=\"HGCal_%s%s%s.png\" height=\"300\" width=\"550\" src=\"http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.png\" /> | The plot on the left shows the 2D profile histogram for *%s* %s that displays the mean value of the material budget in units of radiation length in each R-z cell. R-z cell is 1 cm x 1 mm. The plot depicts the accumulated material budget as seen by the track, as the track travels throughout the detector.[[http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.pdf][Click to enlarge plot]] |" % (plotname,label,zoomstring,zoomdir,plotname,label,zoomstring, label, zoomtitle,zoomdir,plotname,label,zoomstring))
if "Rsum" in plotname and "l_vs" in plotname and not "cos" in plotname:
print("| <img alt=\"HGCal_%s%s%s.png\" height=\"300\" width=\"550\" src=\"http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.png\" /> | The plot on the left shows the 2D profile histogram for *%s* %s that displays the mean value of the material budget in units of interaction length in each R-z cell. R-z cell is 1 cm x 1 mm. The plot depicts the accumulated material budget as seen by the track, as the track travels throughout the detector.[[http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.pdf][Click to enlarge plot]] |" % (plotname,label,zoomstring,zoomdir,plotname,label,zoomstring, label, zoomtitle,zoomdir,plotname,label,zoomstring))
if "Rsumcos" in plotname and "x_vs" in plotname:
print("| <img alt=\"HGCal_%s%s%s.png\" height=\"300\" width=\"550\" src=\"http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.png\" /> | The plot on the left shows the 2D profile histogram for *%s* %s that displays the mean value of the material budget in units of radiation length in each R-z cell. R-z cell is 1 cm x 1 mm. The plot depicts the orthogonal accumulated material budget, that is cos(theta) what the track sees.[[http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.pdf][Click to enlarge plot]] |" % (plotname,label,zoomstring,zoomdir,plotname,label,zoomstring, label, zoomtitle,zoomdir,plotname,label,zoomstring))
if "Rsumcos" in plotname and "l_vs" in plotname:
print("| <img alt=\"HGCal_%s%s%s.png\" height=\"300\" width=\"550\" src=\"http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.png\" /> | The plot on the left shows the 2D profile histogram for *%s* %s that displays the mean value of the material budget in units of interaction length in each R-z cell. R-z cell is 1 cm x 1 mm. The plot depicts the orthogonal accumulated material budget, that is cos(theta) what the track sees.[[http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.pdf][Click to enlarge plot]] |" % (plotname,label,zoomstring,zoomdir,plotname,label,zoomstring, label, zoomtitle,zoomdir,plotname,label,zoomstring))
if "Rloc" in plotname and "x_vs" in plotname and not "cos" in plotname:
print("| <img alt=\"HGCal_%s%s%s.png\" height=\"300\" width=\"550\" src=\"http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.png\" /> | The plot on the left shows the 2D profile histogram for *%s* %s that displays the local mean value of the material budget in units of radiation length in each R-z cell. R-z cell is 1 cm x 1 mm. The plot depicts the local material budget as seen by the track, as the track travels throughout the detector.[[http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.pdf][Click to enlarge plot]] |" % (plotname,label,zoomstring,zoomdir,plotname,label,zoomstring, label, zoomtitle,zoomdir,plotname,label,zoomstring))
if "Rloc" in plotname and "l_vs" in plotname and not "cos" in plotname:
print("| <img alt=\"HGCal_%s%s%s.png\" height=\"300\" width=\"550\" src=\"http://apsallid.web.cern.ch/apsallid/HGCalMaterial/%sHGCal_%s%s%s.png\" /> | The plot on the left shows the 2D profile histogram for *%s* %s that displays the local mean value of the material budget in units of interaction length in each R-z cell. R-z cell is 1 cm x 1 mm. The plot depicts the local material budget as seen by | |
in NIC_WHOIS.keys():
raise ASNRegistryError(
'ASN registry %r is not known.' % ret['asn_registry']
)
ret['asn'] = temp[0].strip(' \n')
ret['asn_cidr'] = temp[2].strip(' \n')
ret['asn_country_code'] = temp[3].strip(' \n').upper()
ret['asn_date'] = temp[5].strip(' \n')
return ret
except (socket.timeout, socket.error):
if retry_count > 0:
return self.get_asn_whois(retry_count - 1)
else:
raise ASNLookupError(
'ASN lookup failed for %r.' % self.address_str
)
except ASNRegistryError:
raise
except:
raise ASNLookupError(
'ASN lookup failed for %r.' % self.address_str
)
def get_whois(self, asn_registry='arin', retry_count=3, server=None,
port=43, extra_blacklist=None):
"""
The function for retrieving whois or rwhois information for an IP
address via any port. Defaults to port 43 (WHOIS).
Args:
asn_registry: The NIC to run the query against.
retry_count: The number of times to retry in case socket errors,
timeouts, connection resets, etc. are encountered.
server: An optional server to connect to. If provided, asn_registry
will be ignored.
port: The network port to connect on.
extra_blacklist: A list of blacklisted whois servers in addition to
the global BLACKLIST.
Returns:
String: The raw whois data.
Raises:
BlacklistError: Raised if the whois server provided is in the
global BLACKLIST or extra_blacklist.
WhoisLookupError: The whois lookup failed.
"""
try:
extra_bl = extra_blacklist if extra_blacklist else []
if server in (BLACKLIST, extra_bl):
raise BlacklistError(
'The server %r is blacklisted.' % server
)
if server is None:
server = NIC_WHOIS[asn_registry]['server']
#Create the connection for the whois query.
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(self.timeout)
conn.connect((server, port))
#Prep the query.
query = self.address_str + '\r\n'
if asn_registry == 'arin':
query = 'n + %s' % query
#Query the whois server, and store the results.
conn.send(query.encode())
response = ''
while True:
d = conn.recv(4096).decode('ascii', 'ignore')
response += d
if not d:
break
conn.close()
if 'Query rate limit exceeded' in response:
sleep(1)
return self.get_whois(asn_registry, retry_count, port)
elif 'error 501' in response or 'error 230' in response:
raise ValueError
return str(response)
except (socket.timeout, socket.error):
if retry_count > 0:
return self.get_whois(asn_registry, retry_count - 1, port)
else:
raise WhoisLookupError(
'Whois lookup failed for %r.' % self.address_str
)
except:
raise WhoisLookupError(
'Whois lookup failed for %r.' % self.address_str
)
def get_rws(self, url=None, retry_count=3):
"""
The function for retrieving Whois-RWS information for an IP address
via HTTP (Whois-RWS).
Args:
url: The URL to retrieve.
retry_count: The number of times to retry in case socket errors,
timeouts, connection resets, etc. are encountered.
Returns:
Dictionary: The whois data in Json format.
Raises:
WhoisLookupError: The whois RWS lookup failed.
"""
try:
#Create the connection for the whois query.
conn = Request(url, headers={'Accept': 'application/json'})
data = self.opener.open(conn, timeout=self.timeout)
try:
d = json.loads(data.readall().decode())
except AttributeError:
d = json.loads(data.read().decode('ascii', 'ignore'))
return d
except (socket.timeout, socket.error):
if retry_count > 0:
return self.get_rws(url, retry_count - 1)
else:
raise WhoisLookupError('Whois RWS lookup failed for %r.' %
url)
except:
raise WhoisLookupError('Whois RWS lookup failed for %r.' % url)
def get_host(self, retry_count=3):
"""
The function for retrieving host information for an IP address.
Args:
retry_count: The number of times to retry in case socket errors,
timeouts, connection resets, etc. are encountered.
Returns:
Tuple: hostname, aliaslist, ipaddrlist
Raises:
HostLookupError: The host lookup failed.
"""
try:
default_timeout_set = False
if not socket.getdefaulttimeout():
socket.setdefaulttimeout(self.timeout)
default_timeout_set = True
ret = socket.gethostbyaddr(self.address_str)
if default_timeout_set:
socket.setdefaulttimeout(None)
return ret
except (socket.timeout, socket.error):
if retry_count > 0:
return self.get_host(retry_count - 1)
else:
raise HostLookupError(
'Host lookup failed for %r.' % self.address_str
)
except:
raise HostLookupError(
'Host lookup failed for %r.' % self.address_str
)
def _parse_fields(self, response, fields_dict, net_start=None,
net_end=None, dt_format=None):
"""
The function for parsing whois fields from a data input.
Args:
response: The response from the whois/rwhois server.
fields_dict: The dictionary of fields -> regex search values.
net_start: The starting point of the network (if parsing multiple
networks).
net_end: The ending point of the network (if parsing multiple
networks).
dt_format: The format of datetime fields if known.
Returns:
Dictionary: A dictionary of fields provided in fields_dict.
"""
ret = {}
for field in fields_dict:
pattern = re.compile(
str(fields_dict[field]),
re.DOTALL
)
if net_start is not None:
match = pattern.finditer(response, net_end, net_start)
elif net_end is not None:
match = pattern.finditer(response, net_end)
else:
match = pattern.finditer(response)
values = []
sub_section_end = None
for m in match:
if sub_section_end:
if field not in (
'abuse_emails',
'tech_emails',
'misc_emails'
) and (sub_section_end != (m.start() - 1)):
break
try:
values.append(m.group('val').strip())
except AttributeError:
values.append(m.group('val2').strip())
sub_section_end = m.end()
if len(values) > 0:
value = None
try:
if field == 'country':
value = values[0].upper()
elif field in ['created', 'updated'] and dt_format:
value = datetime.strptime(
values[0],
str(dt_format)).isoformat('T')
else:
values = unique_everseen(values)
value = '\n'.join(values)
except ValueError:
pass
ret[field] = value
return ret
def lookup(self, inc_raw=False, retry_count=3, get_referral=False,
extra_blacklist=None):
"""
The function for retrieving and parsing whois information for an IP
address via port 43 (WHOIS).
Args:
inc_raw: Boolean for whether to include the raw whois results in
the returned dictionary.
retry_count: The number of times to retry in case socket errors,
timeouts, connection resets, etc. are encountered.
get_referral: Boolean for whether to retrieve referral whois
information, if available.
extra_blacklist: A list of blacklisted whois servers in addition to
the global BLACKLIST.
Returns:
Dictionary: A dictionary containing the following keys:
query (String) - The IP address.
asn (String) - The Autonomous System Number.
asn_date (String) - The ASN Allocation date.
asn_registry (String) - The assigned ASN registry.
asn_cidr (String) - The assigned ASN CIDR.
asn_country_code (String) - The assigned ASN country code.
nets (List) - Dictionaries containing network information
which consists of the fields listed in the NIC_WHOIS
dictionary. Certain IPs have more granular network
listings, hence the need for a list object.
raw (String) - Raw whois results if the inc_raw parameter
is True.
referral (Dictionary) - Dictionary containing referral
whois information if get_referral is True and the
server isn't blacklisted. Consists of fields listed
in the RWHOIS dictionary. Additional referral server
informaion is added in the server and port keys.
raw_referral (String) - Raw referral whois results if the
inc_raw parameter is True.
"""
#Initialize the whois response.
response = None
#Attempt to resolve ASN info via Cymru. DNS is faster, try that first.
try:
asn_data = self.get_asn_dns()
except (ASNLookupError, ASNRegistryError):
try:
asn_data = self.get_asn_whois(retry_count)
except (ASNLookupError, ASNRegistryError):
#Lets attempt to get the ASN registry information from ARIN.
response = self.get_whois('arin', retry_count)
asn_data = {
'asn_registry': None,
'asn': None,
'asn_cidr': None,
'asn_country_code': None,
'asn_date': None
}
matched = False
for match in re.finditer(
r'^ReferralServer:[^\S\n]+(.+)$',
response,
re.MULTILINE
):
matched = True
try:
referral = match.group(1)
referral = referral.replace(':43', '')
asn_data['asn_registry'] = ASN_REFERRALS[referral]
except KeyError:
raise ASNRegistryError('ASN registry lookup failed.')
break
if not matched:
asn_data['asn_registry'] = 'arin'
#Create the return dictionary.
results = {
'query': self.address_str,
'nets': [],
'raw': None,
'referral': None,
'raw_referral': None
}
#Add the ASN information to the return dictionary.
results.update(asn_data)
#The referral server and port. Only used if get_referral is True.
referral_server = None
referral_port = 0
#Only fetch the response if we haven't already.
if response is None or results['asn_registry'] is not 'arin':
#Retrieve the whois data.
response = self.get_whois(results['asn_registry'], retry_count,
extra_blacklist=extra_blacklist)
if get_referral:
#Search for a referral server.
for match in re.finditer(
r'^ReferralServer:[^\S\n]+(.+:[0-9]+)$',
response,
re.MULTILINE
):
try:
temp = match.group(1)
if 'rwhois://' not in temp:
raise ValueError
temp = temp.replace('rwhois://', '').split(':')
if int(temp[1]) > 65535:
raise ValueError
referral_server = temp[0]
referral_port = int(temp[1])
except (ValueError, KeyError):
continue
break
#Retrieve the referral whois data.
if get_referral and referral_server:
response_ref = self.get_whois(None, retry_count, referral_server,
referral_port, extra_blacklist)
if inc_raw:
results['raw_referral'] = response_ref
temp_rnet = self._parse_fields(
response_ref,
RWHOIS['fields']
)
#Add the networks to the return dictionary.
results['referral'] = temp_rnet
#If inc_raw parameter is True, add the response to return dictionary.
if inc_raw:
results['raw'] = response
nets = []
if results['asn_registry'] == 'arin':
#Find the first NetRange value.
pattern = re.compile(
r'^NetRange:[^\S\n]+(.+)$',
re.MULTILINE
)
temp = pattern.search(response)
net_range = None
net_range_start = None
if temp is not None:
net_range = temp.group(1).strip()
net_range_start = temp.start()
#Iterate through all of the networks found, storing the CIDR value
#and the start | |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.7
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
import subprocess
subprocess.run('[ -f setup.py ] || (git clone https://github.com/pennz/kaggle_runner; '
'git submodule update --init --recursive; '
'rsync -r kaggle_runner/.* .; '
'rsync -r kaggle_runner/* .;); '
'python3 -m pip install -e .', shell=True, check=True)
# -
with open("runner.sh", "w") as f:
f.write(
r"""#!/bin/bash -x
export PS4='Line ${LINENO}: ' # for debug
NC=ncat
USER=$1
shift
REPO=$1
shift
BRANCH=$1
shift
PHASE=$1
shift
ENABLE_RVS=$1
shift
SERVER=$1
shift
PORT=$1
shift
ORIG_PORT=23454
CHECK_PORT=$((ORIG_PORT + 1))
pip install --upgrade pip
conda install -y -c eumetsat expect & # https://askubuntu.com/questions/1047900/unbuffer-stopped-working-months-ago
apt update && apt install -y netcat nmap screen time locales >/dev/null 2>&1
apt install -y mosh iproute2 fish tig ctags htop tree pv tmux psmisc >/dev/null 2>&1 &
conda init bash
cat >> ~/.bashrc << EOF
conda activate base # as my dotfiles will fiddle with conda
export SERVER=$SERVER
export CHECK_PORT=$CHECK_PORT
EOF
source rpt # rvs IDE env setup
export SERVER=$SERVER
export CHECK_PORT=$CHECK_PORT
wait_ncat() {
wait_for_ncat=$1
while [ $wait_for_ncat -gt 0 ]; do
wait_for_ncat=$((wait_for_ncat - 1))
which ncat >/dev/null && return 0
done
}
wait_ncat 60
which $NC >/dev/null || NC=nc
export NC
if [ "x${ENABLE_RVS}" = x1 ]; then
if [ -z $(pgrep -f 'jupyter-notebook') ]; then
bash ./rvs.sh $SERVER $PORT 2>&1 &
else
screen -d -m bash -c "{ echo [REMOTE]: rvs log below.; bash -x ./rvs.sh $SERVER $PORT 2>&1; } | $NC --send-only --no-shutdown -w 120s -i $((3600 * 2))s $SERVER $CHECK_PORT"
fi
fi &
pip install ripdb pydicom parse pytest-logger python_logging_rabbitmq coverage &
# python3 -m pip install pyvim neovim msgpack==1.0.0 &
# python -m pip install pyvim neovim msgpack==1.0.0 & # for vim
SRC_WORK_FOLDER=/kaggle/working
[ -d ${SRC_WORK_FOLDER} ] || mkdir -p ${SRC_WORK_FOLDER}
cd ${SRC_WORK_FOLDER}
if [ -d ${REPO} ]; then rm -rf ${REPO}; fi
# get code
{
mvdir() {
[[ "$2"/"$1" -ef "${PWD}" ]] || {
rm -rf "$2"/"$1" &&
mkdir "$2"/"$1"
}
bash -c "mv ""$1""/*"" $2""/""$1"
}
export -f mvdir
if [ ! -d ${REPO} ]; then
git clone --single-branch --branch ${BRANCH} --depth=1 \
https://github.com/${USER}/${REPO}.git ${REPO} && pushd ${REPO} &&
sed -i 's/git@\(.*\):\(.*\)/https:\/\/\1\/\2/' .gitmodules &&
sed -i 's/git@\(.*\):\(.*\)/https:\/\/\1\/\2/' .git/config &&
git submodule update --init --recursive
find . -maxdepth 1 -name ".??*" -o -name "??*" -type f | xargs -I{} mv {} $OLDPWD
find . -maxdepth 1 -name ".??*" -o -name "??*" -type d | xargs -I{} bash -x -c "mvdir {} $OLDPWD"
popd
fi
pip install -e . &
make install_dep >/dev/null
}
USE_AMQP=true
export USE_AMQP
conda init bash
source ~/.bashrc
conda activate base
if [ x"${PHASE}" = x"dev" ]; then
export PS4='[Remote]: Line ${LINENO}: '
(
echo "MOSHing"
make mosh
) &
make toxic | if [ $USE_AMQP -eq true ]; then cat -; else $NC --send-only -w 120s -i $((60 * 5))s $SERVER $CHECK_PORT; fi &
wait # not exit, when dev
fi
if [ x"${PHASE}" = x"data" ]; then
bash ./rvs.sh $SERVER $PORT >/dev/null & # just keep one rvs incase
make dataset
fi
if [ x"${PHASE}" = x"test" ]; then
bash ./rvs.sh $SERVER $PORT >/dev/null & # just keep one rvs incase
#make test
fi
if [ x"${PHASE}" = x"run" ]; then
#pip install kaggle_runner
bash ./rvs.sh $SERVER $PORT >/dev/null & make m & # just keep one rvs incase
make toxic | if [ $USE_AMQP -eq true ]; then cat -; else $NC --send-only -w 120s -i $((60 * 5))s $SERVER $CHECK_PORT; fi
# basically the reverse of the calling path
pkill make & pkill -f "mosh" & pkill sleep & pkill -f "rvs.sh" & pkill ncat &
# python main.py "$@"
fi
"""
)
with open("rvs.sh", "w") as f:
f.write(
r"""#!/bin/bash -x
export PS4='Line ${LINENO}: ' # for debug
NC=${NC:-ncat}
type $NC || ( echo >&2 "$NC cannot be found. Exit."; exit 1;)
# https://stackoverflow.com/questions/57877451/retrieving-output-and-exit-code-of-a-coprocess
# coproc { sleep 30 && echo "Output" && exit 3; }
# Saving the coprocess's PID for later, as COPROC_PID apparently unsets when its finished
# COPROC_PID_backup=$COPROC_PID
#
# Retrieving the coprocess's output
# output=$(cat <&$COPROC)
#
# Retrieving the coprocess's exit code
# wait $COPROC_PID_backup
#
# Echoing out the results
# echo $?
# echo $output
echo BASH NOW: $BASHPID
PID_FILE_PATH=/tmp/nc.pid
EXIT_FILE_PATH=/tmp/rvs_exit.$BASHPID.pid
test -f $EXIT_FILE_PATH && rm $EXIT_FILE_PATH
SERVER=$1
shift
PORT=$1
shift
ORIG_PORT=23454
CHECK_PORT=$((ORIG_PORT + 1))
check_exit_status() {
[ -f /tmp/rvs_return ] && return 0
if [ -f $EXIT_FILE_PATH ] && [ x"$(cat $EXIT_FILE_PATH)" = x0 ]; then
return 0
fi
return 1 # not ok
}
connect_setup() {
connect_again_flag=1
sleep_time=5
while [ ${connect_again_flag} -eq 1 ]; do
check_exit_status && return 0
$NC -w ${1}s -i 1800s $SERVER $PORT -c "echo $(date) started connection; echo $HOSTNAME; python -c 'import pty; pty.spawn([\"/bin/bash\", \"-li\"])'"
RSRET=$?
echo $RSRET > $EXIT_FILE_PATH
(/bin/ss -lpants | grep "ESTAB.*$PORT") || >&2 echo "\"$NC -w ${1}s -i 1800s $SERVER $PORT\" return with code $RSRET"
if [ x"$RSRET" = x"0" ]; then
[ -f /tmp/rvs_exit ] && return 0
return 255 # just do not return
fi
[ $RSRET -eq 0 ] && connect_again_flag=0
[ $RSRET -eq 1 ] && sleep ${sleep_time} && sleep_time=$((sleep_time + sleep_time))
done
# exit, will cause rvs script exit, beside, RSRET not 0, mean connection loss
# thing
RSRET=1 # just never exit
echo $RSRET > $EXIT_FILE_PATH && return $RSRET
}
connect_again() {
# pkill -f "nc.*$PORT" # no need now, our listen server can accept multiple
# connection now
connect_setup $1
}
WAIT_LIMIT=2048
INIT_WAIT=8
port_connect_status=0
wait_time=$INIT_WAIT
floatToInt() {
parsed=$(printf "%.0f" "$@")
[ ! $? -eq 0 ] && parsed=0
echo $parsed
} 2> /dev/null
while true; do
check_exit_status && exit 0
# if find that server cannot be connected, we try to restart our reverse connect again
nc_time=$($(which time) -f "%e" $NC -zw $wait_time $SERVER $CHECK_PORT 2>&1 > /dev/null)
nc_ret=$?
nc_time=$(echo $nc_time | awk '{print $NF}')
nc_time=$(floatToInt $nc_time)
if [ ${nc_ret} -eq 0 ]; then
# recover connection, need to connect_again too. For 1st time, will try to connect
# no connection last time, have connction now
if [ $port_connect_status -eq 0 ]; then
echo "recover connection, reset wait_time and try to reconnect"
wait_time=$INIT_WAIT
# previous connection is lost, we wait for longer to setup connection
check_exit_status || wait_time=15
connect_again $wait_time &
else
wait_time=$((wait_time + wait_time)) # double wait, network fine
if [ $wait_time -gt ${WAIT_LIMIT} ]; then wait_time=${WAIT_LIMIT}; fi
fi
port_connect_status=1
else
if [ $port_connect_status -eq 1 ]; then
echo "found connection loss, reset wait_time and try to reconnect"
wait_time=$INIT_WAIT
check_exit_status || wait_time=15 # previous connection is lost
connect_again $wait_time &
else # no connection all the time? we still try to connect...
wait_time=$((wait_time + wait_time))
if [ $wait_time -gt ${WAIT_LIMIT} ]; then wait_time=${WAIT_LIMIT}; fi
connect_again $wait_time &
fi
port_connect_status=0
fi
sleep $((wait_time - nc_time)) # check every XX seconds
echo $hostname $HOSTNAME
done
wait # wait for any background
# https://medium.com/@6c2e6e2e/spawning-interactive-reverse-shells-with-tty-a7e50c44940e
# In reverse shell
# $ python -c 'import pty; pty.spawn("/bin/bash")'
# Ctrl-Z
#
# In Attacker console
# $ stty raw -echo
# $ fg
#
# In reverse shell
# $ reset
# $ export SHELL=bash
# $ export TERM=xterm-256color
# $ stty rows <num> columns <cols>
"""
)
with open("rpt", "w") as f:
f.write(
r"""#!/bin/bash
[ -d ~/.fzf ] || {
git clone --depth=1 https://github.com/pennz/dotfiles
rsync -r dotfiles/.* ~
rsync -r dotfiles/* ~
pushd ~
git submodule update --init
.fzf/install --all
curl -fLo ~/.config/nvim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# vim -u ~/.vimrc_back "+call plug#begin()" +PlugInstall +qa &
# ( sleep 60; nvim -Vnvim_log -u ~/.vimrc_back "+call plug#begin()" +PlugInstall +checkhealth +qa )&
ln -s .shrc_customised.macos .shrc_customised
echo "alias gdrive='gdrive --service-account a.json'" >> ~/.bash_aliases
echo "unalias vim" >> ~/.bash_aliases
popd
cat >> ~/.profile << EOF
export SHELL=/bin/bash
export TERM=screen-256color
stty intr ^\c susp ^\x eof ^\f echo opost
# https://unix.stackexchange.com/questions/343088/what-is-the-equivalent-of-stty-echo-for-zsh
# unsetopt ZLE # for zsh
# for ourside stty raw isig -echo icrnl time 3 echoprt opost eof ^\p
color_my_prompt () {
local __user_and_host="\[\033[01;32m\]\u@\h"
local __cur_location="\[\033[01;34m\]\w"
local __git_branch_color="\[\033[31m\]"
# local __git_branch="\`ruby -e \"print (%x{git branch 2> /dev/null}.grep(/^\*/).first || '').gsub(/^\* (.+)$/, '(\1) ')\"\`"
local __git_branch='`git branch 2> /dev/null | grep -e ^* | ${SED:-sed} -E s/^\\\\\*\ \(.+\)$/\(\\\\\1\)\ /`'
local __prompt_tail="\[\033[35m\]$"
local __last_color="\[\033[00m\]"
export PS1="$__user_and_host $__cur_location $__git_branch_color$__git_branch$__prompt_tail$__last_color "
}
ENV=/root/.bashrc
PYTHONWARNINGS=ignore:::pip._internal.cli.base_command
MPLBACKEND=module://ipykernel.pylab.backend_inline
PS4="$HOSTNAME: "'${LINENO}: '
_=/usr/bin/env
PWD=/kaggle/working
cd $PWD
OLDPWD=/root
# color_my_prompt
locale-gen
echo "#" $(grep 'cpu ' /proc/stat >/dev/null;sleep 0.1;grep 'cpu ' /proc/stat | awk -v RS="" '{print "CPU: "($13-$2+$15-$4)*100/($13-$2+$15-$4+$16-$5)"%"}') "Mem: "$(awk '/MemTotal/{t=$2}/MemAvailable/{a=$2}END{print 100-100*a/t"%"}' /proc/meminfo) "Uptime: "$(uptime | awk '{print $1 " " $2 " " $3}')
echo "#" TPU_NAME=$TPU_NAME
nvidia-smi
conda activate base
EOF
}
"""
)
with open("gdrive_setup", "w") as f:
f.write(
r"""#!/bin/bash
wget https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-x64
chmod +x gdrive-linux-x64
cp gdrive-linux-x64 /bin/gdrive
mkdir ~/.gdrive
# auth file
cat > ~/.gdrive/a.json << EOF
NO_PASS
EOF
gdrive --service-account a.json list # just test
SRC_WORK_FOLDER=/kaggle/input
[ -d ${SRC_WORK_FOLDER} ] || {
mkdir -p ${SRC_WORK_FOLDER}
cd ${SRC_WORK_FOLDER}
gdrive --service-account a.json download | |
# Some entries do start with a space, but it is not *all* spaces
+ b"(?P<key>[^\x1b]*[^ \x1b][^\x1b]*)")
else:
# takes care of (1) in the function doc
selected_regex = re.compile(
# This won't work when we have multi line values in key/values
# :/
highlight_string.encode('utf-8')
+ b"\x1b\[(?P<row>[0-9]+);(?P<column_key>[0-9]+)H"
# Some entries do start with a space, but it is not *all*
# spaces; they might finish with a string of spaces, but
# definitely in a escape sequence
+ b"(?P<key>[^\x1b]*[^ \x1b][^\x1b]*) *\x1b")
if isinstance(entry_string, str):
# convert to bytes
entry_string = entry_string.encode('utf-8')
entry_regex = re.compile(entry_string)
seen_entries = collections.defaultdict(int)
last_seen_entry = None
last_seen_entry_count = 0
# we have either a bug or a feature, not clear and we miss the
# first line that has the first entry selected; so the first time
# we just go back and go fo
first_scroll = True
for _ in range(max_scrolls):
if first_scroll != True:
if _direction:
# FIXME: use get_key() -- how do we pass the terminal encoding?
target.console_tx("\x1b[A") # press arrow up
else:
target.console_tx("\x1b[B") # press arrow down
else:
if _direction:
# FIXME: use get_key() -- how do we pass the terminal encoding?
target.console_tx("\x1b[B") # press arrow up
else:
target.console_tx("\x1b[A") # press arrow down
first_scroll = False
skips = 4
while skips > 0:
skips -= 1
target.report_info(
# FIXME: make column a part of the BIOS menu profile
"%s: waiting for highlighted entry on column %s"
% (name, column_key), dlevel = 1)
# read until we receive something that looks like an entry
# selected
try:
r = target.expect(selected_regex, name = name,
# don't report much, it's kinda
# useless and we can get super huge
# lines? FIXME: make it a config option
#report = 0
# if we don't get it in ten seconds,
# bad--fail quick so we retry
timeout = timeout)
except tcfl.tc.failed_e as e:
# FIXME: use _timeout_e from target_c's expect(), make
# it more official
if 'timed out' not in str(e):
raise
target.report_info("%s: timed out, trying again %d"
% (name, skips), dlevel = 1)
# sometimes these are caused by bad serial lines,
# with key characters missed, so we just try to
# scroll up and try again; we don't try to go up and
# down because it confuses the state machine.
skips += 0.5
if _direction:
# FIXME: use get_key() -- how do we pass the terminal encoding?
target.console_tx("\x1b[B") # press arrow up
else:
target.console_tx("\x1b[A") # press arrow down
continue
# the key always matches spaces all the way to the end, so it
# needs to be stripped
key = r[name]['groupdict']['key'].strip()
key_at_column = int(r[name]['groupdict']['column_key'])
# entries are always on column four (FIXME: BIOS profile)
if key_at_column == column_key:
break
# this might be another false negative we hit (like the
# drawing of parts at the bottom in highlight), so let's retry
target.report_info("%s: found non-interesting entry '%s' @%s"
% (name, key, column_key))
continue
else:
target.report_info(
"%s: didn't find an interesting entry after four tries"
% name, dlevel = 1)
return None
target.report_info("%s: found highlighted entry '%s' @%s"
% (name, key, column_key), dlevel = 1)
seen_entries[key] += 1
if all(seen_count > 3 for seen_count in seen_entries.values()):
target.report_info("%s: scrolled twice through all entries;"
" did not find '%s'"
% (name, entry_string), dlevel = 1)
return None
if last_seen_entry == key:
last_seen_entry_count += 1
else:
last_seen_entry = key
last_seen_entry_count = 1
if last_seen_entry_count > 2:
# make sure this count is lower then the one above for
# seen_entries; we might not have seen all the entries in
# the menu and have a limited count to make a judgement on
# maybe this is a menu that does not wrap around, flip the
# direction
_direction = not _direction
m = entry_regex.search(key)
if m:
target.report_info("%s: highlighted entry found" % name)
r[name]['groupdict'].update(m.groupdict())
return r[name]['groupdict']
target.report_info("%s: found highlighted entry '%s'; not '%s'"
"--scrolling"
% (name, key, entry_string), dlevel = 1)
return None
def menu_dig_to(
target, entries,
# FIXME: move to BIOS profile
canary_end_menu_redrawn = "F10=Save Changes and Exit",
highlight_string = normal_white_fg_black_bg,
dig_last = True,
level = "top"):
"""Dig all the way down a list of nested menus
Given a nested menu hierarchy, select the each entry from the
given list to navigate into the hierarchy.
For example, given the hierarchy::
level1
level11
level12
level121
level122
level123
level124
level125
...
level13
level14
level141
level142
level1421
level1422
level14221
level14222
level1423
level143
level144
the list of entries *[ "level1", "level14", "level142",
"level1422", "level14221" ]* would take control of the serial
console to select each menu entry on the way down the hierarchy
until entry *level14222* is selected.
:param tcfl.tc.target_c target: target on which to operate (uses
the default console)
:param list(str) entries: list of the menu and submenu entries
that have to be selected.
An item can also be a tuple with two or thee entries::
( ENTRYNAME, MENUTITLE [, HASVALUE] )
This is used when the menu tittle for a submenu will be
different than the entry name; *MENUTITLE* can be *None* if we
know it is going to be the same and we need to only specify
*HASVALUE*.
*HASVALUE* is a boolean that indicates the entry is a key/value
entry (see :func:`menu_scroll_to_entry`).
:param str canary_end_menu_redrawn: (optional) string that is
printed when the whole menu has been refreshed (and thus marks
the end of the menu).
:param str highlight_string: (optional) sequence that prefixes an
entry being highlighted; defaults to ANSI normal (non bold),
white foreground, black background::
\\x1b[0m\\x1b\[37m\\x1b\[40m
:param bool dig_last: (optional; default *True*) select the last
menu entry once highlighted.
:param str level: (optional; default *top*) name of the top level menu
"""
assert isinstance(target, tcfl.tc.target_c)
commonl.assert_list_of_types(entries, "entries", "entry",
( str, tuple))
assert isinstance(canary_end_menu_redrawn, str)
assert isinstance(highlight_string, str)
assert isinstance(dig_last, bool)
assert isinstance(level, str)
cnt = 0
rs = collections.OrderedDict()
entries_len = len(entries)
menu_name = [ level ]
_menu_name = " > ".join(menu_name)
while cnt < entries_len:
# Ok, time to scroll till the next one
entry_next = entries[cnt]
has_value = False
if isinstance(entry_next, tuple):
if len(entry_next) > 2: # watch out, we'll override it next
has_value = entry_next[2]
entry_next = entry_next[0]
menu_title = entry_next[1]
else:
menu_title = entry_next
cnt += 1 # important this is here for later
r = menu_scroll_to_entry(
target, entry_next,
has_value = has_value, direction = "down",
highlight_string = highlight_string)
if not r:
raise tcfl.tc.error_e(
"BIOS:%s: can't find entry '%s'" % (_menu_name, entry_next))
else:
rs[entry_next] = r
if cnt < entries_len or cnt == entries_len and dig_last == True:
target.report_info("BIOS: %s: selecting menu entry '%s'"
% (_menu_name, entry_next))
entry_select(target)
# Wait for main menu title
#
# - \x1b is the escape char
# - ^[XX;YYH means place next string at (X, Y)
# - The main manu label is placed in row 2, any column (ANSI
# ^[02;YH) and it is prefixed by a space, note the row
# number is in %02d format (prefixing zero).
#
menu_name.append(menu_title)
_menu_name = " > ".join(menu_name)
submenu_header_expect(
target, menu_title,
canary_end_menu_redrawn = canary_end_menu_redrawn,
menu_name = _menu_name)
if cnt == entries_len:
# We got there, done!
return rs
raise tcfl.tc.error_e(
"BIOS:%s: we never got to the entry after %d tries"
% (level, cnt))
def submenu_header_expect(
target, menu_title,
# FIXME: move to BIOS profile
canary_end_menu_redrawn = "^v=Move Highlight",
menu_name = None):
"""
Wait for a submenu header to show up
When a submenu or dialog box is printed, it is prefixed by a
header like::
/------------------------------------\\
| |
| Submenu title |
| |
\------------------------------------/
wider or narrower or depending on the dialog or full width (for a
submenu).
This function waits for the header to show up.
:param tcfl.tc.target_c target: target on which to operate (uses
the default console)
:param str menu_title: string that is expected to be printed in
the menu/dialog.
:param str canary_end_menu_redrawn: (optional) string that is
printed when the whole menu has been | |
<gh_stars>1-10
# Type: module
# String form: <module 'WindAlpha.analysis' from '/opt/conda/lib/python3.5/site-packages/WindAlpha/analysis.py'>
# File: /opt/conda/lib/python3.5/site-packages/WindAlpha/analysis.py
# Source:
# -*- coding: utf-8 -*-
from __future__ import division
from collections import OrderedDict
from .util import *
from .data_type import *
from .metrics import return_perf_metrics
from sklearn.linear_model import LinearRegression
from tqdm import tqdm_notebook
from .get_data import *
from scipy.stats import pearsonr
from dateutil.parser import parse
# from numba import jit
# 准备分析用的原始数据
def prepare_raw_data(stock_code,
ind_codes,
start_date,
end_date,
period='M',
is_index=True,
include_st=False,
include_suspend=False,
include_new_stock=False,
ipo_days=60):
"""
:param stock_code: list or str, 股票代码列表或股票池(指数、版块等)代码,如'000300.SH' 或['60000.SH', '600001.SH']
:param ind_codes: list or str, 因子代码或代码列表,查询API量化因子部分获取支持的因子代码
:param start_date: str, 数据开始日期,如'2015-01-01'
:param end_date: str, 数据结束日期,如'2015-01-01'
:param period: str, 数据周期,日:'D',周:'W',月:'M',季:'Q',年:'Y’,默认为月'M'
:param is_index: bool, stock_code是否为指数代码,默认True
:param include_st: bool, 是否包含st股票,包含:True,不包含:False,默认:False
:param include_suspend: bool, 是否包含当天停牌股票,包含:True,不包含:False,默认:False
:param include_new_stock: bool, 是否包含当天新上市股票,包含:True,不包含:False,默认:False
:param ipo_days: int, 新股上市ipo_days天以内的去除,当include_new_stock为False时有效,默认60
:return: pd.DataFrame,MultiIndex类型的DataFrame,level1_index为时间,level2_index为当期的股票代码,如下
TECH_AD20 MKT_CAP_ASHARE NEXT_RET
-------------------------------------------------------------
2016-01-29 000001.SZ 0.587290 1.180405e+11 -0.044997
000009.SZ -0.379785 1.765258e+10 -0.076252
000027.SZ 0.271688 9.300787e+09 -0.004648
000039.SZ -1.161553 1.663698e+10 0.015784
000046.SZ 0.387505 4.545337e+10 -0.030490
000060.SZ -0.489812 1.865929e+10 0.239582
000061.SZ 0.152975 2.417196e+10 -0.150774
000063.SZ 0.335387 4.857702e+10 -0.047118
000069.SZ 0.332492 2.102879e+10 0.007758
000100.SZ 0.995547 3.257033e+10 0.029934
-------------------------------------------------------------
2016-02-28 601985.SH 0.483055 2.754828e+10 -0.040351
601988.SH 0.839193 6.786650e+11 -0.015650
601989.SH 0.471473 1.111591e+11 -0.059918
601991.SH 0.821821 4.017733e+10 -0.032873
601992.SH 0.882625 2.371200e+10 0.067242
601998.SH 0.653886 1.770737e+11 -0.031110
603000.SH -0.831474 1.639740e+10 0.046121
603288.SH -0.290027 7.404761e+09 -0.057672
603885.SH -1.433724 3.283040e+09 -0.127513
603993.SH 0.781285 4.145194e+10 0.006231
"""
dict_df = OrderedDict()
if isinstance(ind_codes, str):
ind_codes = [ind_codes]
ind_codes = [i.upper() for i in ind_codes]
# 获取交易日期
if start_date < end_date:
dates_data = tdays(start_date, end_date, period=period)
else:
raise Exception("start_date must lower than end_date")
dates = dates_data.Data[0]
dates = [dt.strftime("%Y-%m-%d") for dt in dates]
# 配置指标代码
data_codes = [CAP_CODES, "TRADE_STATUS", "IPO_DATE"]
data_codes.extend(ind_codes)
sub_cols = ind_codes.copy()
sub_cols.extend([CAP_CODES, "NEXT_RET"])
stock_codes = stock_code
terms_len = len(dates) - 1
with tqdm_notebook(total=terms_len) as pbar:
for i in range(terms_len):
cur_date = dates[i]
next_date = dates[i + 1]
pbar.set_description('提取数据')
pbar.set_description('{}'.format(cur_date))
# 获取指数成分股数据
if is_index:
if stock_code[-2:] in ['SH', 'SZ', 'WI']:
stock_codes = wset("sectorconstituent", "date="+cur_date+";windcode="+stock_code).Data[1]
else:
stock_codes = wset("sectorconstituent", "date=" + cur_date + ";sectorid=" + stock_code).Data[1]
# 获取因子数据ind_codes, 交易状态态数据(TRADE_STATUS)、首次上市日期数据(IPO_DATE)
_, df_raw = wss(stock_codes, ",".join(data_codes), tradeDate=cur_date, usedf=True)
_, close = wss(stock_codes, "CLOSE", tradeDate=cur_date, priceAdj="F", cycle="1", usedf=True)
_, close_next = wss(stock_codes, "CLOSE", tradeDate=next_date, priceAdj="F", cycle="1", usedf=True)
df_raw["NEXT_RET"] = (close_next-close)/close
# 去除新上市的股票(ipo_days天以内)
if not include_new_stock:
date_least = tdaysoffset(-ipo_days, cur_date, "").Data[0][0]
df_raw = df_raw[df_raw['IPO_DATE'] <= date_least]
# 去除停牌的股票
if not include_suspend:
df_raw = df_raw[df_raw['TRADE_STATUS'] == u'交易']
# 去除ST的股票
if not include_st:
_, df_st = wset("sectorconstituent", "date=2018-07-13;sectorId=1000006526000000", usedf=True)
not_st_lst = [code for code in df_raw.index if code not in df_st['wind_code'].tolist()]
df_raw = df_raw.loc[not_st_lst]
df_raw_ind = df_raw[sub_cols]
dict_df[cur_date] = df_raw_ind.dropna()
pbar.update(1)
if i == terms_len-1:
pbar.set_description('完成')
df_res = pd.concat(dict_df.values(), keys=dict_df.keys())
df_res.index.names = ['date', 'codes']
return df_res
def process_raw_data(raw_ind_ret, extreme_num=3, extreme_method='mad', scale_method='normal', funcs=None):
"""
处理原始因子数据
:param raw_ind_ret: pd.DataFrame, 原始因子数据,结构如prepare_raw_data返回的数据
:param extreme_num: int, 去极值的判断区间,如果extreme_method='std',extreme_num=3,则超过3个标准差的为极端值
:param extreme_method: str, 去极值的方法,可选参数'mad':平均绝对离差法,'std':标准差法, 默认'mad'
:param scale_method: str, 标准化的方法,可选参数'normal': 正常标准化,因子均值为因子算法平均值;
'cap': 市值加权标准化,因子均值为市值加权均值。默认'normal'
:param funcs: list, 自定义数据处理函数,默认None
:return: pd.DataFrame
"""
from functools import partial
raw_data = raw_ind_ret.copy()
if extreme_method and extreme_method not in ['mad','std']:
raise ValueError("extreme_method must be one of ['mad','std'] or False")
if scale_method and scale_method not in ['normal','cap']:
raise ValueError("extreme_method must be one of ['normal','cap'] or False")
p_extreme_process = partial(extreme_process, num=extreme_num, method=extreme_method)
p_scale_process = partial(scale_process, method=scale_method)
all_funcs = [p_extreme_process, p_scale_process]
if not funcs:
funcs = []
all_funcs.extend(funcs)
if not extreme_method:
all_funcs.remove(p_extreme_process)
if not scale_method:
all_funcs.remove(p_scale_process)
if all_funcs:
for func in all_funcs:
raw_data = raw_data.groupby(level=0).apply(func)
if not all_funcs:
print("未定义任何数据处理函数,返回原始数据!")
return raw_data
def ic_analysis(ind_ret_data, ic_method='rank'):
"""
对因子数据进行IC分析
:param ind_ret_data: pd.DataFrame, 处理后的因子数据,结构如prepare_raw_data返回的数据
:param ic_method: str, ic计算方法:'rank':依据排序大小计算信息系数,
'normal'为依据数值大小计算的信息系数,
'risk_adj': 风险调整后的信息系数
:return: ICAnalysis,其中ic_series为每个因子ic时间序列,
ic_decay为每个因子12期的IC衰减,
ic_stats为ic的统计指标
"""
ic_series = get_ic_series(ind_ret_data, ic_method)
ic_decay = get_ic_decay(ind_ret_data, ic_method)
ic_mean = ic_series.groupby(level=0).apply(lambda frame: frame['ic'].mean())
ic_std = ic_series.groupby(level=0).apply(lambda frame: frame['ic'].std())
ic_ir = ic_mean / ic_std
ic_stats = pd.DataFrame({
'IC_mean': ic_mean,
'IC_std': ic_std,
'IC_IR': ic_ir
})
ret = ICAnalysis()
ret.ic_series = ic_series
ret.ic_decay = ic_decay
ret.ic_stats = ic_stats
return ret
def get_ic_series(ind_ret_data, ic_method='rank'):
"""
计算因子ic序列
:param ind_ret_data: pd.DataFrame, 处理后的因子数据,结构如prepare_raw_data返回的数据
:param ic_method: str, ic计算方法,'rank': 依据排序大小计算信息系数,、
'normal': 为依据数值大小计算的信息系数
'risk_adj': 风险调整后的信息系数
"""
ind_names = get_ind_names(ind_ret_data)
dict_ic = OrderedDict()
def _ic(frame, ic_method):
cov = None
if ic_method == 'risk_adj':
dt = frame.index[0][0]
codes = frame.index.get_level_values(1).tolist()
st_dt = tdaysoffset(-30, dt).Data[0][0]
st_dt = st_dt.strftime("%Y-%m-%d")
dts =[i.strftime("%Y-%m-%d") for i in tdays(st_dt, dt).Data[0]]
chg_list = []
for tt in dts:
temp = wss(codes, "pct_chg", tradeDate=tt, cycle="D", usedf=True)[1]
temp.columns = [tt]
chg_list.append(temp)
df_ret = pd.concat(chg_list, axis=1)
cov = df_ret.T.cov()
return info_coeff(frame[ind], frame['NEXT_RET'], ic_method, cov)
for ind in ind_names:
ts_ic = ind_ret_data.groupby(level=0).apply(lambda frame: _ic(frame, ic_method))
ic = ts_ic.map(lambda i: i[0])
p_value = ts_ic.map(lambda i: i[1])
dict_ic[ind] = pd.DataFrame({'ic': ic, "p_value": p_value})
df_ic = pd.concat(dict_ic.values(), keys=dict_ic.keys())
return df_ic
def get_ic_decay(ind_ret_data, ic_method='rank'):
"""
计算因子ic衰减
:param ind_ret_data: pd.DataFrame, 处理后的因子数据,结构如prepare_raw_data返回的数据
:param ic_method: str, ic计算方法,'rank': 依据排序大小计算信息系数,
'normal': 为依据数值大小计算的信息系数
'risk_adj': 风险调整后的信息系数
"""
# 获取所有股票的价格数据
temp_price_lst = []
all_codes = list(set(ind_ret_data.index.get_level_values(1).tolist()))
all_dates = list(set(ind_ret_data.index.get_level_values(0).tolist()))
all_dates.sort()
for dt in all_dates:
_, df = wss(all_codes, "CLOSE", tradeDate=dt, priceAdj="F", cycle="1", usedf=True)
df.columns = [dt]
temp_price_lst.append(df)
df_all_price = pd.concat(temp_price_lst, axis=1)
# 计算IC_decay
grouped = ind_ret_data.groupby(level=0)
n = len(grouped)
lag = min(n, 12)
rets = OrderedDict()
ind_names = get_ind_names(ind_ret_data)
for ind in ind_names:
rets[ind] = OrderedDict()
for (dt, frame) in grouped:
if dt != all_dates[-1]:
rets[ind][dt] = []
frame = frame.reset_index(level=0, drop=True)
base_ind = frame[ind] # 当期因子数据
base_codes = frame.index.tolist() # 当期所有待分析的股票代码
base_close = df_all_price.ix[base_codes, dt] # 当前期的close
dt_idx = all_dates.index(dt)
for idx in range(dt_idx + 1, dt_idx + 1 + lag):
if idx < len(all_dates):
lag_dt = all_dates[idx]
lag_close = df_all_price.ix[base_codes, lag_dt]
lag_ret = np.log(lag_close / base_close)
(ic, pvalue) = info_coeff(base_ind, lag_ret, ic_method, cov=None)
rets[ind][dt].append(ic)
lt = len(rets[ind][dt])
rets[ind][dt].extend([np.nan] * (lag - lt))
df_dict = OrderedDict()
for k, v in rets.items():
df_dict[k] = pd.DataFrame(v).T
df_ic_dec = pd.concat(df_dict.values(), keys=df_dict.keys())
res = df_ic_dec.groupby(level=0).mean()
res.columns = ["LAG" + str(i) for i in range(0, lag)]
return res.T
def add_group(ind_ret_data,
ind_name=None,
group_num=5,
direction='ascending',
industry_neu=False,
industry_type='sw',
industry_level=1):
"""
根据因子数值添加分组
:param ind_ret_data: pd.DataFrame, 处理后的因子数据,结构如prepare_raw_data返回的数据
:param ind_name: str, 需要分子的因子名
:param group_num: int or float, 当为大于等于2的整数时,对股票平均分组;
当为(0,0.5)之间的浮点数,对股票分为3组,前group_num%为G01,后group_num%为G02
中间为G03
:param direction: str, 设置所有因子的排序方向,'ascending'表示因子值越大分数越高,'descending'表示因子值越小分数越高;
:return: pd.DataFrame, 如下,只返回进行分组的因子的数据(还包括市值及下期收益率数据),新增一列为GROUP,G01为得分最高的一组
TECH_AD20 MKT_CAP_ASHARE NEXT_RET GROUP
-------------------------------------------------------------------
2016-01-29 000001.SZ 0.587290 1.180405e+11 -0.044997 G02
000009.SZ -0.379785 1.765258e+10 -0.076252 G04
000027.SZ 0.271688 9.300787e+09 -0.004648 G03
000039.SZ -1.161553 1.663698e+10 0.015784 G05
000046.SZ 0.387505 4.545337e+10 -0.030490 G03
000060.SZ -0.489812 1.865929e+10 0.239582 G04
000061.SZ 0.152975 2.417196e+10 -0.150774 G03
000063.SZ 0.335387 4.857702e+10 -0.047118 G03
000069.SZ 0.332492 2.102879e+10 0.007758 G03
000100.SZ 0.995547 3.257033e+10 0.029934 G01
-------------------------------------------------------------------
2016-02-28 601985.SH 0.483055 2.754828e+10 -0.040351 G02
601988.SH 0.839193 6.786650e+11 -0.015650 G01
601989.SH 0.471473 1.111591e+11 -0.059918 G02
601991.SH 0.821821 4.017733e+10 -0.032873 G01
601992.SH 0.882625 2.371200e+10 0.067242 G01
601998.SH 0.653886 1.770737e+11 -0.031110 G02
603000.SH -0.831474 1.639740e+10 0.046121 G05
603288.SH -0.290027 7.404761e+09 -0.057672 G04
603885.SH -1.433724 3.283040e+09 -0.127513 G05
603993.SH 0.781285 4.145194e+10 0.006231 G01
"""
dict_ascending = {
'ascending': True,
'descending': False
}
ascending = dict_ascending[direction]
def __add_group(frame):
num = group_num
ind_name = get_ind_names(frame)[0]
rnk = frame[ind_name].rank(
ascending=ascending)
rnk += rnk.isnull().sum()
rnk = rnk.fillna(0)
if num > 1:
labels = ['G{:0>2}'.format(i) for i in range(1, num + 1)]
num = int(num)
category = pd.cut(-rnk, bins=num, labels=labels).astype(str)
category.name = 'GROUP'
new_frame = frame.join(category)
elif num < 0.50 and num > 0:
percentile_up = np.percentile(frame[ind_name], 100 * (1 - num))
percentile_low = np.percentile(frame[ind_name], 100 * num)
new_frame = frame.copy()
new_frame['GROUP'] = ['G02'] * len(frame[ind_name])
new_frame['GROUP'][frame[ind_name] >= percentile_up] = 'G01'
new_frame['GROUP'][frame[ind_name] <= percentile_low] = 'G03'
else:
raise ValueError('num must be int greater than 1 or float in (0,0.5)')
return new_frame
def __add_group_neu(frame):
new_frame = frame.groupby('INDUSTRY').apply(lambda x: __add_group(x))
return new_frame
if ind_name:
ind_name = [ind_name]
else:
ind_names = get_ind_names(ind_ret_data)
if len(ind_names) == 1:
ind_name = ind_names
else:
raise ValueError('must specify ind_name')
column = ind_name + [i for i in [CAP_CODES, 'NEXT_RET'] if i in ind_ret_data.columns]
ind_ret_data = ind_ret_data[column]
if not industry_neu:
if isinstance(ind_ret_data.index, pd.MultiIndex):
return ind_ret_data.groupby(level=0).apply(__add_group)
elif isinstance(ind_ret_data.index, pd.Index) and not isinstance(ind_ret_data.index, pd.MultiIndex):
return __add_group(ind_ret_data)
else:
if isinstance(ind_ret_data.index, pd.MultiIndex):
stocks = sorted(ind_ret_data.index.get_level_values(1).unique())
_, industries = w.wss(stocks, "industry_" + industry_type, "industryType=" + str(industry_level),
usedf=True)
industries.columns = ['INDUSTRY']
ind_ret_data = ind_ret_data.groupby(level=0).apply(
lambda frame: pd.concat([frame.reset_index(level=0, drop=True), industries], join='inner', axis=1))
return ind_ret_data.groupby(level=0).apply(__add_group_neu)
elif isinstance(ind_ret_data.index, pd.Index) and not isinstance(ind_ret_data.index, pd.MultiIndex):
stocks = sorted(ind_ret_data.index.tolist())
_, industries = w.wss(stocks, "industry_" + industry_type, "industryType=" + str(industry_level),
usedf=True)
industries.columns = ['INDUSTRY']
ind_ret_data = pd.concat([ind_ret_data, industries], | |
'''
Copyright (C) 2020-2021 <NAME> <<EMAIL>>
Released under the Apache-2.0 License.
'''
import os, sys, re
import functools
import torch as th
import collections
from tqdm import tqdm
import pylab as lab
import traceback
import math
import statistics
from scipy import stats
import numpy as np
import random
from .utils import IMmean, IMstd, renorm, denorm, xdnorm, chw2hwc
from termcolor import cprint, colored
def rank_attack(model, attack, loader, *, dconf, device, verbose=False):
'''
generic attack method for embedding/ranking models
'''
# >> pre-process the options
normimg = dconf.get('normimg', False)
if dconf.get('metric', None) is None:
raise ValueError('dconf parameter misses the "metric" key')
candidates = model.compute_embedding(loader, device=device,
l2norm=(True if dconf['metric']=='C' else False))
if dconf.get('TRANSFER', None) is None:
dconf['TRANSFER'] = None
else:
candidates_trans = dconf['TRANSFER']['model'].compute_embedding(
loader, device=dconf['TRANSFER']['device'],
l2norm=(True if 'C' in dconf['TRANSFER']['transfer'] else False))
dconf['TRANSFER']['candidates'] = candidates_trans
ruthless = int(os.getenv('RUTHLESS', -1)) # maxiter for attack
# >> dispatch: attacking
print('>>> Candidate Set:', candidates[0].shape, candidates[1].shape)
correct_orig, correct_adv, total = 0, 0, 0
rankup, embshift, prankgt, prank_trans = [], [], [], []
for N, (images, labels) in tqdm(enumerate(loader), total=len(loader)):
#if N < random.randint(0, 60502//2): continue # picking sample for vis
#if N < 14676//2: continue
if (ruthless > 0) and (N >= ruthless):
break
if verbose: cprint('\n'+'\u2500'*64, 'cyan')
if re.match('^Q.?:PGD-M\d+$', attack) is not None:
regroup = re.match('^Q(.?):PGD-M(\d+)$', attack).groups()
pm = str(regroup[0]) # + / -
assert(pm in ['+', '-'])
M = int(regroup[1]) # m, num of candidates
assert(M > 0)
xr, r, out, loss, count = RankPGD(model, images, labels,
candidates, eps=dconf['epsilon'], verbose=verbose,
device=device, loader=loader, metric=dconf['metric'],
normimg=normimg, atype='QA', M=M, pm=pm,
transfer=dconf['TRANSFER'])
elif re.match('^SPQ.?:PGD-M\d+$', attack) is not None:
regroup = re.match('^SPQ(.?):PGD-M(\d+)$', attack).groups()
pm = str(regroup[0]) # + / -
assert(pm in ['+', '-'])
M = int(regroup[1]) # m, num of candidates
assert(M > 0)
xr, r, out, loss, count = RankPGD(model, images, labels,
candidates, eps=dconf['epsilon'], verbose=verbose,
device=device, loader=loader, metric=dconf['metric'],
normimg=normimg, atype='SPQA', M=M, pm=pm,
transfer=dconf['TRANSFER'])
elif re.match('^F:PGD-M\d+$', attack) is not None:
regroup = re.match('^F:PGD-M(\d+)$', attack).groups()
M = int(regroup[0]) # m, num of candidates
assert(M > 1)
xr, r, out, loss, count = RankPGD(model, images, labels,
candidates, eps=dconf['epsilon'], verbose=verbose,
device=device, loader=loader, metric=dconf['metric'],
normimg=normimg, atype='FOA', M=M, pm=None,
transfer=dconf['TRANSFER'])
elif re.match('^SPO:PGD-M\d+$', attack) is not None:
regroup = re.match('^SPO:PGD-M(\d+)$', attack).groups()
M = int(regroup[0]) # m, num of candidates
xr, r, out, loss, count = RankPGD(model, images, labels,
candidates, eps=dconf['epsilon'], verbose=verbose,
device=device, loader=loader, metric=dconf['metric'],
normimg=normimg, atype='SPFOA', M=M, pm=None,
transfer=dconf['TRANSFER'])
else:
raise ValueError(f"Attack {attack} unsupported.")
correct_orig += count[0][0]
correct_adv += count[1][0]
total += len(labels)
rankup.append(count[1][1])
embshift.append(count[1][2])
prankgt.append(count[1][3])
prank_trans.append(count[1][4])
if N*images.shape[0] > 10000: break # XXX: N=10000 for speed
total = max(1,total)
# >> report overall attacking result on the test dataset
cprint('\u2500'*64, 'cyan')
if int(os.getenv('IAP', 0)) > 0:
cprint(' '.join([f'Summary[{attack} \u03B5={dconf["epsilon"]}]:',
'white-box=', '%.3f'%statistics.mean(rankup), # abuse var
'black-box=', '%.3f'%statistics.mean(embshift), # abuse var
'white-box-orig=', '%.3f'%statistics.mean(prankgt), # abuse var
'black-box-orig=', '%.3f'%statistics.mean(prank_trans), # abuse var
]), 'cyan')
else:
cprint(' '.join([f'Summary[{attack} \u03B5={dconf["epsilon"]}]:',
'baseline=', '%.3f'%(100.*(correct_orig/total)),
'adv=', '%.3f'%(100.*(correct_adv/total)),
'advReduce=', '%.3f'%(100.*(correct_orig - correct_adv) / total),
'rankUp=', '%.3f'%statistics.mean(rankup),
'embShift=', '%.3f'%statistics.mean(embshift),
'prankgt=', '%.3f'%statistics.mean(prankgt),
'prank_trans=', '%.3f'%statistics.mean(prank_trans),
]), 'cyan')
cprint('\u2500'*64, 'cyan')
class LossFactory(object):
'''
Factory of loss functions used in all ranking attacks
'''
@staticmethod
def RankLossEmbShift(repv: th.tensor, repv_orig: th.tensor, *, metric: str):
'''
Computes the embedding shift, we want to maximize it by gradient descent
'''
if metric == 'C':
distance = 1 - th.mm(repv, repv_orig.t())
loss = -distance.trace() # gradient ascent on trace, i.e. diag.sum
elif metric == 'E':
distance = th.nn.functional.pairwise_distance(repv, repv_orig, p=2)
loss = -distance.sum()
return loss
@staticmethod
def RankLossQueryAttack(qs: th.tensor, Cs: th.tensor, Xs: th.tensor, *, metric: str, pm: str,
dist: th.tensor = None, cidx: th.tensor = None):
'''
Computes the loss function for pure query attack
'''
assert(qs.shape[1] == Cs.shape[2] == Xs.shape[1])
NIter, M, D, NX = qs.shape[0], Cs.shape[1], Cs.shape[2], Xs.shape[0]
DO_RANK = (dist is not None) and (cidx is not None)
losses, ranks = [], []
#refrank = []
for i in range(NIter):
#== compute the pairwise loss
q = qs[i].view(1, D) # [1, output_1]
C = Cs[i, :, :].view(M, D) # [1, output_1]
if metric == 'C':
A = (1 - th.mm(q, C.t())).expand(NX, M)
B = (1 - th.mm(Xs, q.t())).expand(NX, M)
elif metric == 'E':
A = (C - q).norm(2, dim=1).expand(NX, M)
B = (Xs - q).norm(2, dim=1).view(NX, 1).expand(NX, M)
#== loss function
if '+' == pm:
loss = (A-B).clamp(min=0.).mean()
elif '-' == pm:
loss = (-A+B).clamp(min=0.).mean()
losses.append(loss)
#== compute the rank
if DO_RANK:
ranks.append(th.mean(dist[i].flatten().argsort().argsort()
[cidx[i,:].flatten()].float()).item())
#refrank.append( ((A>B).float().mean()).item() )
#print('(debug)', 'rank=', statistics.mean(refrank))
loss = th.stack(losses).mean()
rank = statistics.mean(ranks) if DO_RANK else None
return loss, rank
@staticmethod
def RankLossFullOrderM2Attack(qs: th.tensor, ps: th.tensor, ns: th.tensor, *, metric: str):
'''
Computes the loss function for M=2 full-order attack
'''
assert(qs.shape[0] == ps.shape[0] == ns.shape[0])
assert(qs.shape[1] == ps.shape[1] == ns.shape[1])
Batch, D = qs.shape[0], qs.shape[1]
if metric == 'C':
dist1 = 1 - th.nn.functional.cosine_similarity(qs, ps, dim=1)
dist2 = 1 - th.nn.functional.cosine_similarity(qs, ns, dim=1)
elif metric == 'E':
dist1 = th.nn.functional.pairwise_distance(qs, ps, p=2)
dist2 = th.nn.functional.pairwise_distance(qs, ns, p=2)
else:
raise ValueError(metric)
loss = (dist1 - dist2).clamp(min=0.).mean()
acc = (dist1 <= dist2).sum().item() / Batch
return loss, acc
@staticmethod
def RankLossFullOrderMXAttack(qs: th.tensor, Cs: th.tensor, *, metric=str):
assert(qs.shape[1] == Cs.shape[2])
NIter, M, D = qs.shape[0], Cs.shape[1], Cs.shape[2]
losses, taus = [], []
for i in range(NIter):
q = qs[i].view(1, D)
C = Cs[i, :, :].view(M, D)
if metric == 'C':
dist = 1 - th.mm(q, C.t())
elif metric == 'E':
dist = (C - q).norm(2, dim=1)
tau = stats.kendalltau(np.arange(M), dist.cpu().detach().numpy())[0]
taus.append(tau)
dist = dist.expand(M, M)
loss = (dist.t() - dist).triu(diagonal=1).clamp(min=0.).mean()
losses.append(loss)
loss = th.stack(losses).mean()
tau = statistics.mean(x for x in taus if not math.isnan(x))
return loss, tau
def __init__(self, request: str):
'''
Initialize various loss functions
'''
self.funcmap = {
'QA': self.RankLossQueryAttack,
'QA+': functools.partial(self.RankLossQueryAttack, pm='+'),
'QA-': functools.partial(self.RankLossQueryAttack, pm='-'),
'FOA2': self.RankLossFullOrderM2Attack,
'FOAX': self.RankLossFullOrderMXAttack,
}
if request not in self.funcmap.keys():
raise KeyError(f'Requested loss function "{request}" not found!')
self.request = request
def __call__(self, *args, **kwargs):
return self.funcmap[self.request](*args, **kwargs)
## MARK: STAGE0
def RankPGD(model, images, labels, candi, *,
eps=0.3, alpha=1./255., atype=None, M=None, W=None, pm=None,
verbose=False, device='cpu', loader=None, metric=None,
normimg=False, transfer=None):
'''
Perform FGSM/PGD Query/Candidate attack on the given batch of images, L_infty constraint
https://github.com/tensorflow/cleverhans/blob/master/cleverhans/attacks/fast_gradient_method.py
This is the core of the adversarial ranking implementation,
but we don't have enough energy to tidy it up before ICCV submission.
'''
# >> prepare the current batch of images
assert(type(images) == th.Tensor)
images = images.clone().detach().to(device)
images_orig = images.clone().detach()
images.requires_grad = True
labels = labels.to(device).view(-1)
# >> sanity check for normalized images, if any
if normimg:
# normed_img = (image - mean)/std
IMmean = th.tensor([0.485, 0.456, 0.406], device=device)
IMstd = th.tensor([0.229, 0.224, 0.225], device=device)
renorm = lambda im: im.sub(IMmean[:,None,None]).div(IMstd[:,None,None])
denorm = lambda im: im.mul(IMstd[:,None,None]).add(IMmean[:,None,None])
if (not normimg) and ((images > 1.0).sum() + (images < 0.0).sum() > 0):
raise Exception("please toggle 'normimg' as True for sanity")
def tensorStat(t):
return f'Min {t.min().item()} Max {t.max().item()} Mean {t.mean().item()}'
#<<<<<< STAGE1: ORIG SAMPLE EVALUATION <<<<<<
model.eval()
with th.no_grad():
# -- [orig] -- forward the original samples with the original loss
# >> Result[output]: embedding vectors
# >> Result[dist]: distance matrix (current batch x database)
if metric == 'C':
output = model.forward(images, l2norm=True)
dist = 1 - output @ candi[0].t() # [num_output_num, num_candidate]
elif metric == 'E':
output = model.forward(images, l2norm=False)
dist = []
# the memory requirement is insane if we want to do the pairwise distance
# matrix in a single step like faC_c2f2_siamese.py's loss function.
for i in range(output.shape[0]):
xq = output[i].view(1, -1)
xqd = (candi[0] - xq).norm(2, dim=1).squeeze()
dist.append(xqd)
dist = th.stack(dist) # [num_output_num, num_candidate]
else:
raise ValueError(metric)
output_orig = output.clone().detach()
dist_orig = dist.clone().detach()
loss = th.tensor(-1) # we don't track this value anymore
loss_orig = th.tensor(-1) # we don't track this value anymore
#== <transfer> forward the samples with the transfer model
if transfer is not None:
if 'C' in transfer['transfer']:
output_trans = transfer['model'].forward(images, l2norm=True)
dist_trans = 1 - output_trans @ transfer['candidates'][0].t()
elif 'E' in transfer['transfer']:
output_trans = transfer['model'].forward(images, l2norm=False)
dist_trans = []
for i in range(output_trans.shape[0]):
xtrans = output_trans[i].view(1, -1)
xdtrans = (transfer['candidates'][0] - xtrans).norm(2, dim=1).squeeze()
dist_trans.append(xdtrans)
dist_trans = th.stack(dist_trans)
# -- [orig] -- select attack targets and | |
# This is the image processing module, which I used to preprocess my images, augment my dataset, and
# organize them into a structure suitable for input to a machine learning model
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
import tensorflow as tf
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
from PIL import Image
from six.moves import range
# filenames for the training and testing folders
train_folder = "Train"
test_folder = "Test"
# standard dimensions to which all images will be rescaled
dimensions = (50, 50)
# maximum angle by which the image can be rotated during data augmentation
max_angle = 15
# function to rotate an image by a given angle and fill in the black corners created
# with a specified color
def rotate_img(image, angle, color, filter = Image.NEAREST):
if image.mode == "P" or filter == Image.NEAREST:
matte = Image.new("1", image.size, 1) # mask
else:
matte = Image.new("L", image.size, 255) # true matte
bg = Image.new(image.mode, image.size, color)
bg.paste(
image.rotate(angle, filter),
matte.rotate(angle, filter)
)
return bg
# function to turn grey-colored backgrounds to white. r, b and g specify the
# exact shade of grey color to eliminate. Source: stackoverflow.
def make_greyscale_white_bg(im, r, b, g):
im = im.convert('RGBA') # Convert to RGBA
data = np.array(im) # "data" is a height x width x 4 numpy array
red, green, blue, alpha = data.T # Temporarily unpack the bands for readability
# Replace grey with white... (leaves alpha values alone...)
grey_areas = (red == r) & (blue == b) & (green == g)
data[..., :-1][grey_areas.T] = (255, 255, 255) # Transpose back needed
im2 = Image.fromarray(data)
im2 = im2.convert('L') # convert to greyscale image
return im2
# Make a specified number of copies if the given image by rotating the original image by
# some random angle, and save the images according to the naming scheme followed by the original images
def random_rotate(img, copies, curr_filename, path):
c_color = img.getpixel((0,0)) # get the pixel values of top-left corner of image
for i in range(copies):
# rotate image by a random angle from [-max_angle, max_angle], using the c_color to fill in the corners
new_im = rotate_img(img, np.random.randint((0 - max_angle), max_angle), c_color)
# save new image to file
new_im.save(os.path.join(path, "bcc" + str(curr_filename).zfill(6) + ".bmp"))
curr_filename = curr_filename + 1
# augment the dataset by adding random rotations. The count of the original images is needed
# for naming the new images in a sequential order
def augment_by_rotations(folder, prev_cnt):
classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] # get list of all sub-folders in folder
for path_to_folder in classes:
if os.path.isdir(path_to_folder):
images = [os.path.join(path_to_folder, i) for i in sorted(os.listdir(path_to_folder)) if i != '.DS_Store']
filename = prev_cnt
for image in images:
im = Image.open(image)
# make 4 copies of each image, with random rotations added in
random_rotate(im, 4, filename, path_to_folder)
filename = filename + 4
print("Finished augmenting " + path_to_folder)
# function to invert colors (black -> white and white-> black). Since most of the image consists
# of white areas, specified by (255, 255, 255) in RGB, inverting the colors means more zeros, making
# future operations less computationally expensive
def invert_colors(im):
im = im.convert('RGBA') # Convert to RGBA
data = np.array(im) # "data" is a height x width x 4 numpy array
red, green, blue, alpha = data.T # Temporarily unpack the bands for readability
# Replace black with red temporarily... (leaves alpha values alone...)
black_areas = (red == 0) & (blue == 0) & (green == 0)
data[..., :-1][black_areas.T] = (255, 0, 0) # Transpose back needed
# Replace white areas with black
white_areas = (red == 255) & (blue == 255) & (green == 255)
data[..., :-1][white_areas.T] = (0, 0, 0) # Transpose back needed
# Replace red areas (originally white) with black
red_areas = (red == 255) & (blue == 0) & (green == 0)
data[..., :-1][red_areas.T] = (255, 255, 255) # Transpose back needed
im2 = Image.fromarray(data)
im2 = im2.convert('L') # convert to greyscale image
return im2
# function to test the other functions on a specified image
# this is not needed once the other functins are confirmed to be working
def test_rotations():
img = Image.open("Train/172/bcc000002.bmp")
#img = img.rotate(30)
img = img.resize(dimensions)
rot = make_greyscale_white_bg(img, 127, 127, 127)
rot = invert_colors(rot)
c_color = rot.getpixel((0, 0))
rot = rotate_img(rot, 10, c_color)
w, h = rot.size
rot.show()
# function to process images (resizing, removal of grey backgrounds if any, color inversion, greyscale conversion)
def process_images(folder):
classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] # get list of all sub-folders in folder
img_cnt = 0
for class_x in classes:
if os.path.isdir(class_x):
# get paths to all the images in this folder
images = [os.path.join(class_x, i) for i in sorted(os.listdir(class_x)) if i != '.DS_Store']
for image in images:
img_cnt = img_cnt + 1
if(img_cnt % 1000 == 0): # show progress
print("Processed %s images" % str(img_cnt))
im = Image.open(image)
im = im.resize(dimensions) # resize image according to dimensions set
im = make_greyscale_white_bg(im, 127, 127, 127) # turn grey background (if any) to white, and
# convert into greyscale image with 1 channel
im = invert_colors(im)
im.save(image) # overwrite previous image file with new image
print("Finished processing images, images found = ")
print(img_cnt)
process_images(test_folder)
process_images(train_folder)
augment_by_rotations(train_folder, 240)
# The code below organizes the processed images into structures suitable for use with ML models
# A lot of the code is obtained from the assignments in the Google Deep Learning course in Udacity
image_size = 50 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
# function to load all images from given folder, then convert the dataset into a 3D array (image index, x, y)
# of floating point values, normalized to have approximately zero mean and
# standard deviation ~0.5 to make training easier.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image_index, image in enumerate(image_files):
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) - # normalize data
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') # skip unreadable files
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images: # check if a given min. no. of images
raise Exception('Many fewer images than expected: %d < %d' % # has been loaded
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
# function to store the normalized tensors obtained from the load_letter function in
# .pickle files for later use
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
folders_list = os.listdir(data_folders)
for folder in folders_list:
#print(os.path.join(data_folders, folder))
curr_folder_path = os.path.join(data_folders, folder)
if os.path.isdir(curr_folder_path):
set_filename = curr_folder_path + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(curr_folder_path, min_num_images_per_class) # load and normalize the data
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folder, 1050, True) # load, normalize and pickle the train and test datasets
test_datasets = maybe_pickle(test_folder, 58, True)
# function to make two empty arrays, one for the input data and one for the labels
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
# function to merge all the images in the given pickle file. Part of the training dataset is used to
# create a validation dataset for hyperparameter tuning.
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t | |
import unittest
import saspy
from saspy.tests.util import Utilities
class TestSASstat(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sas = saspy.SASsession()
util = Utilities(cls.sas)
procNeeded=['reg', 'mixed', 'hpsplit', 'hplogistic', 'hpreg', 'glm', 'logistic', 'tpspline',
'hplogistic', 'hpreg', 'phreg', 'ttest', 'factor']
if not util.procFound(procNeeded):
cls.skipTest("Not all of these procedures were found: %s" % str(procNeeded))
@classmethod
def tearDownClass(cls):
if cls.sas:
cls.sas._endsas()
def defineData(self):
self.sas.submit("""
data Myeloma;
input Time VStatus LogBUN HGB Platelet Age LogWBC Frac
LogPBM Protein SCalc;
label Time='Survival Time'
VStatus='0=Alive 1=Dead';
datalines;
1.25 1 2.2175 9.4 1 67 3.6628 1 1.9542 12 10
1.25 1 1.9395 12.0 1 38 3.9868 1 1.9542 20 18
2.00 1 1.5185 9.8 1 81 3.8751 1 2.0000 2 15
2.00 1 1.7482 11.3 0 75 3.8062 1 1.2553 0 12
2.00 1 1.3010 5.1 0 57 3.7243 1 2.0000 3 9
3.00 1 1.5441 6.7 1 46 4.4757 0 1.9345 12 10
5.00 1 2.2355 10.1 1 50 4.9542 1 1.6628 4 9
5.00 1 1.6812 6.5 1 74 3.7324 0 1.7324 5 9
6.00 1 1.3617 9.0 1 77 3.5441 0 1.4624 1 8
6.00 1 2.1139 10.2 0 70 3.5441 1 1.3617 1 8
6.00 1 1.1139 9.7 1 60 3.5185 1 1.3979 0 10
6.00 1 1.4150 10.4 1 67 3.9294 1 1.6902 0 8
7.00 1 1.9777 9.5 1 48 3.3617 1 1.5682 5 10
7.00 1 1.0414 5.1 0 61 3.7324 1 2.0000 1 10
7.00 1 1.1761 11.4 1 53 3.7243 1 1.5185 1 13
9.00 1 1.7243 8.2 1 55 3.7993 1 1.7404 0 12
11.00 1 1.1139 14.0 1 61 3.8808 1 1.2788 0 10
11.00 1 1.2304 12.0 1 43 3.7709 1 1.1761 1 9
11.00 1 1.3010 13.2 1 65 3.7993 1 1.8195 1 10
11.00 1 1.5682 7.5 1 70 3.8865 0 1.6721 0 12
11.00 1 1.0792 9.6 1 51 3.5051 1 1.9031 0 9
13.00 1 0.7782 5.5 0 60 3.5798 1 1.3979 2 10
14.00 1 1.3979 14.6 1 66 3.7243 1 1.2553 2 10
15.00 1 1.6021 10.6 1 70 3.6902 1 1.4314 0 11
16.00 1 1.3424 9.0 1 48 3.9345 1 2.0000 0 10
16.00 1 1.3222 8.8 1 62 3.6990 1 0.6990 17 10
17.00 1 1.2304 10.0 1 53 3.8808 1 1.4472 4 9
17.00 1 1.5911 11.2 1 68 3.4314 0 1.6128 1 10
18.00 1 1.4472 7.5 1 65 3.5682 0 0.9031 7 8
19.00 1 1.0792 14.4 1 51 3.9191 1 2.0000 6 15
19.00 1 1.2553 7.5 0 60 3.7924 1 1.9294 5 9
24.00 1 1.3010 14.6 1 56 4.0899 1 0.4771 0 9
25.00 1 1.0000 12.4 1 67 3.8195 1 1.6435 0 10
26.00 1 1.2304 11.2 1 49 3.6021 1 2.0000 27 11
32.00 1 1.3222 10.6 1 46 3.6990 1 1.6335 1 9
35.00 1 1.1139 7.0 0 48 3.6532 1 1.1761 4 10
37.00 1 1.6021 11.0 1 63 3.9542 0 1.2041 7 9
41.00 1 1.0000 10.2 1 69 3.4771 1 1.4771 6 10
41.00 1 1.1461 5.0 1 70 3.5185 1 1.3424 0 9
51.00 1 1.5682 7.7 0 74 3.4150 1 1.0414 4 13
52.00 1 1.0000 10.1 1 60 3.8573 1 1.6532 4 10
54.00 1 1.2553 9.0 1 49 3.7243 1 1.6990 2 10
58.00 1 1.2041 12.1 1 42 3.6990 1 1.5798 22 10
66.00 1 1.4472 6.6 1 59 3.7853 1 1.8195 0 9
67.00 1 1.3222 12.8 1 52 3.6435 1 1.0414 1 10
88.00 1 1.1761 10.6 1 47 3.5563 0 1.7559 21 9
89.00 1 1.3222 14.0 1 63 3.6532 1 1.6232 1 9
92.00 1 1.4314 11.0 1 58 4.0755 1 1.4150 4 11
4.00 0 1.9542 10.2 1 59 4.0453 0 0.7782 12 10
4.00 0 1.9243 10.0 1 49 3.9590 0 1.6232 0 13
7.00 0 1.1139 12.4 1 48 3.7993 1 1.8573 0 10
7.00 0 1.5315 10.2 1 81 3.5911 0 1.8808 0 11
8.00 0 1.0792 9.9 1 57 3.8325 1 1.6532 0 8
12.00 0 1.1461 11.6 1 46 3.6435 0 1.1461 0 7
11.00 0 1.6128 14.0 1 60 3.7324 1 1.8451 3 9
12.00 0 1.3979 8.8 1 66 3.8388 1 1.3617 0 9
13.00 0 1.6628 4.9 0 71 3.6435 0 1.7924 0 9
16.00 0 1.1461 13.0 1 55 3.8573 0 0.9031 0 9
19.00 0 1.3222 13.0 1 59 3.7709 1 2.0000 1 10
19.00 0 1.3222 10.8 1 69 3.8808 1 1.5185 0 10
28.00 0 1.2304 7.3 1 82 3.7482 1 1.6721 0 9
41.00 0 1.7559 12.8 1 72 3.7243 1 1.4472 1 9
53.00 0 1.1139 12.0 1 66 3.6128 1 2.0000 1 11
57.00 0 1.2553 12.5 1 66 3.9685 0 1.9542 0 11
77.00 0 1.0792 14.0 1 60 3.6812 0 0.9542 0 12
;;
run;
data SocioEconomics;
input Population School Employment Services HouseValue;
datalines;
5700 12.8 2500 270 25000
1000 10.9 600 10 10000
3400 8.8 1000 10 9000
3800 13.6 1700 140 25000
4000 12.8 1600 140 25000
8200 8.3 2600 60 12000
1200 11.4 400 10 16000
9100 11.5 3300 60 14000
9900 12.5 3400 180 18000
9600 13.7 3600 390 25000
9600 9.6 3300 80 12000
9400 11.4 4000 100 13000
;;
run;
data time;
input time @@;
datalines;
43 90 84 87 116 95 86 99 93 92
121 71 66 98 79 102 60 112 105 98
;;
run;
data pressure;
input SBPbefore SBPafter @@;
datalines;
120 128 124 131 130 131 118 127
140 132 128 125 140 141 135 137
126 118 130 132 126 129 127 135
;;
run;
""")
def test_smokeReg(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
# REG
b = stat.reg(data=tr, model='weight=height')
a = ['ANOVA', 'COOKSDPLOT', 'DFBETASPANEL', 'DFFITSPLOT', 'DIAGNOSTICSPANEL', 'FITPLOT', 'FITSTATISTICS',
'LOG', 'NOBS', 'OBSERVEDBYPREDICTED', 'PARAMETERESTIMATES', 'QQPLOT', 'RESIDUALBOXPLOT',
'RESIDUALBYPREDICTED',
'RESIDUALHISTOGRAM', 'RESIDUALPLOT', 'RFPLOT', 'RSTUDENTBYLEVERAGE', 'RSTUDENTBYPREDICTED']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u"Simple Regession (reg) model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def regResult1(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.reg(data=tr, model='weight=height')
self.assertIsInstance(b, saspy.sasresults.SASresults, msg="correct return type")
def regResult2(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('PANDAS')
b = stat.reg(data=tr, model='weight=height')
self.assertIsInstance(b.ANOVA, pandas.core.frame.DataFrame, msg="correct return type")
def regResult3(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('PANDAS')
b = stat.reg(data=tr, model='weight=height')
self.assertIsInstance(b.LOG, IPython.core.display.HTML, msg="correct return type")
def regResult4(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('PANDAS')
b = stat.reg(data=tr, model='weight=height')
self.assertIsInstance(b.RESIDUALHISTOGRAM, IPython.core.display.HTML, msg="correct return type")
def test_smokeMixed(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.mixed(data=tr, model='weight=height')
a = ['COVPARMS', 'DIMENSIONS', 'FITSTATISTICS', 'LOG', 'MODELINFO', 'NOBS', 'PEARSONPANEL',
'RESIDUALPANEL', 'STUDENTPANEL', 'TESTS3']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_smokeGLM(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.glm(data=tr, model='weight=height')
a = ['DIAGNOSTICSPANEL', 'FITPLOT', 'FITSTATISTICS', 'LOG', 'MODELANOVA', 'NOBS', 'OVERALLANOVA',
'PARAMETERESTIMATES', 'RESIDUALPLOTS']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_smokeLogistic(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.logistic(data=tr, model='sex=height weight')
self.assertFalse('ERROR_LOG' in b.__dir__(), msg=u"logistic had errors in the log")
def test_smokeTpspline(self):
# Basic model returns objects
stat = self.sas.sasstat()
self.sas.submit("""
data work.melanoma;
input year incidences @@;
datalines;
1936 0.9 1937 0.8 1938 0.8 1939 1.3
1940 1.4 1941 1.2 1942 1.7 1943 1.8
1944 1.6 1945 1.5 1946 1.5 1947 2.0
1948 2.5 1949 2.7 1950 2.9 1951 2.5
1952 3.1 1953 2.4 1954 2.2 1955 2.9
1956 2.5 1957 2.6 1958 3.2 1959 3.8
1960 4.2 1961 3.9 1962 3.7 1963 3.3
1964 3.7 1965 3.9 1966 4.1 1967 3.8
1968 4.7 1969 4.4 1970 4.8 1971 4.8
1972 4.8
;;
run;
""")
tr = self.sas.sasdata("melanoma", "work")
b = stat.tpspline(data=tr, model='incidences = (year) /alpha = 0.1', output='out = result | |
# Copyright The IETF Trust 2013-2022, All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
from django.urls import reverse
import debug # pyflakes:ignore
from ietf.utils.test_utils import TestCase
from ietf.group.factories import GroupFactory, RoleFactory
from ietf.meeting.models import Session, ResourceAssociation, SchedulingEvent, Constraint
from ietf.meeting.factories import MeetingFactory, SessionFactory
from ietf.name.models import ConstraintName, TimerangeName
from ietf.person.models import Person
from ietf.secr.sreq.forms import SessionForm
from ietf.utils.mail import outbox, empty_outbox, get_payload_text
from pyquery import PyQuery
SECR_USER='secretary'
class SreqUrlTests(TestCase):
def test_urls(self):
MeetingFactory(type_id='ietf',date=datetime.date.today())
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.get("/secr/")
self.assertEqual(r.status_code, 200)
r = self.client.get("/secr/sreq/")
self.assertEqual(r.status_code, 200)
testgroup=GroupFactory()
r = self.client.get("/secr/sreq/%s/new/" % testgroup.acronym)
self.assertEqual(r.status_code, 200)
class SessionRequestTestCase(TestCase):
def test_main(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
SessionFactory.create_batch(2, meeting=meeting, status_id='sched')
SessionFactory.create_batch(2, meeting=meeting, status_id='disappr')
# An additional unscheduled group comes from make_immutable_base_data
url = reverse('ietf.secr.sreq.views.main')
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
sched = r.context['scheduled_groups']
self.assertEqual(len(sched), 2)
unsched = r.context['unscheduled_groups']
self.assertEqual(len(unsched), 8)
def test_approve(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
ad = Person.objects.get(user__username='ad')
area = RoleFactory(name_id='ad', person=ad, group__type_id='area').group
mars = GroupFactory(parent=area, acronym='mars')
# create session waiting for approval
session = SessionFactory(meeting=meeting, group=mars, status_id='apprw')
url = reverse('ietf.secr.sreq.views.approve', kwargs={'acronym':'mars'})
self.client.login(username="ad", password="<PASSWORD>")
r = self.client.get(url)
self.assertRedirects(r,reverse('ietf.secr.sreq.views.view', kwargs={'acronym':'mars'}))
self.assertEqual(SchedulingEvent.objects.filter(session=session).order_by('-id')[0].status_id, 'appr')
def test_cancel(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
ad = Person.objects.get(user__username='ad')
area = RoleFactory(name_id='ad', person=ad, group__type_id='area').group
session = SessionFactory(meeting=meeting, group__parent=area, group__acronym='mars', status_id='sched')
url = reverse('ietf.secr.sreq.views.cancel', kwargs={'acronym':'mars'})
self.client.login(username="ad", password="<PASSWORD>")
r = self.client.get(url)
self.assertRedirects(r,reverse('ietf.secr.sreq.views.main'))
self.assertEqual(SchedulingEvent.objects.filter(session=session).order_by('-id')[0].status_id, 'deleted')
def test_edit(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
mars = RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars').group
group2 = GroupFactory()
group3 = GroupFactory()
group4 = GroupFactory()
iabprog = GroupFactory(type_id='program')
SessionFactory(meeting=meeting,group=mars,status_id='sched')
url = reverse('ietf.secr.sreq.views.edit', kwargs={'acronym':'mars'})
self.client.login(username="marschairman", password="<PASSWORD>")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
attendees = 10
comments = 'need lights'
mars_sessions = meeting.session_set.filter(group__acronym='mars')
empty_outbox()
post_data = {'num_session':'2',
'attendees': attendees,
'constraint_chair_conflict':iabprog.acronym,
'session_time_relation': 'subsequent-days',
'adjacent_with_wg': group2.acronym,
'joint_with_groups': group3.acronym + ' ' + group4.acronym,
'joint_for_session': '2',
'timeranges': ['thursday-afternoon-early', 'thursday-afternoon-late'],
'session_set-TOTAL_FORMS': '3', # matches what view actually sends, even with only 2 filled in
'session_set-INITIAL_FORMS': '1',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
'session_set-0-id':mars_sessions[0].pk,
'session_set-0-name': mars_sessions[0].name,
'session_set-0-short': mars_sessions[0].short,
'session_set-0-purpose': mars_sessions[0].purpose_id,
'session_set-0-type': mars_sessions[0].type_id,
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': mars_sessions[0].on_agenda,
'session_set-0-remote_instructions': mars_sessions[0].remote_instructions,
'session_set-0-attendees': attendees,
'session_set-0-comments': comments,
'session_set-0-DELETE': '',
# no session_set-1-id because it's a new request
'session_set-1-name': '',
'session_set-1-short': '',
'session_set-1-purpose': 'regular',
'session_set-1-type': 'regular',
'session_set-1-requested_duration': '3600',
'session_set-1-on_agenda': True,
'session_set-1-remote_instructions': mars_sessions[0].remote_instructions,
'session_set-1-attendees': attendees,
'session_set-1-comments': comments,
'session_set-1-DELETE': '',
'session_set-2-id': '',
'session_set-2-name': '',
'session_set-2-short': '',
'session_set-2-purpose': 'regular',
'session_set-2-type': 'regular',
'session_set-2-requested_duration': '',
'session_set-2-on_agenda': 'True',
'session_set-2-attendees': attendees,
'session_set-2-comments': '',
'session_set-2-DELETE': 'on',
'submit': 'Continue'}
r = self.client.post(url, post_data, HTTP_HOST='example.com')
redirect_url = reverse('ietf.secr.sreq.views.view', kwargs={'acronym': 'mars'})
self.assertRedirects(r, redirect_url)
# Check whether updates were stored in the database
sessions = Session.objects.filter(meeting=meeting, group=mars)
self.assertEqual(len(sessions), 2)
session = sessions[0]
self.assertEqual(session.constraints().get(name='chair_conflict').target.acronym, iabprog.acronym)
self.assertEqual(session.constraints().get(name='time_relation').time_relation, 'subsequent-days')
self.assertEqual(session.constraints().get(name='wg_adjacent').target.acronym, group2.acronym)
self.assertEqual(
list(session.constraints().get(name='timerange').timeranges.all().values('name')),
list(TimerangeName.objects.filter(name__in=['thursday-afternoon-early', 'thursday-afternoon-late']).values('name'))
)
self.assertFalse(sessions[0].joint_with_groups.count())
self.assertEqual(list(sessions[1].joint_with_groups.all()), [group3, group4])
# Check whether the updated data is visible on the view page
r = self.client.get(redirect_url)
self.assertContains(r, 'Schedule the sessions on subsequent days')
self.assertContains(r, 'Thursday early afternoon, Thursday late afternoon')
self.assertContains(r, group2.acronym)
self.assertContains(r, 'Second session with: {} {}'.format(group3.acronym, group4.acronym))
# check that a notification was sent
self.assertEqual(len(outbox), 1)
notification_payload = get_payload_text(outbox[0])
self.assertIn('1 Hour, 1 Hour', notification_payload)
self.assertNotIn('1 Hour, 1 Hour, 1 Hour', notification_payload)
# Edit again, changing the joint sessions and clearing some fields. The behaviour of
# edit is different depending on whether previous joint sessions were recorded.
empty_outbox()
post_data = {'num_session':'2',
'attendees':attendees,
'constraint_chair_conflict':'',
'comments':'need lights',
'joint_with_groups': group2.acronym,
'joint_for_session': '1',
'session_set-TOTAL_FORMS': '3', # matches what view actually sends, even with only 2 filled in
'session_set-INITIAL_FORMS': '2',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
'session_set-0-id':sessions[0].pk,
'session_set-0-name': sessions[0].name,
'session_set-0-short': sessions[0].short,
'session_set-0-purpose': sessions[0].purpose_id,
'session_set-0-type': sessions[0].type_id,
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': sessions[0].on_agenda,
'session_set-0-remote_instructions': sessions[0].remote_instructions,
'session_set-0-attendees': sessions[0].attendees,
'session_set-0-comments': sessions[1].comments,
'session_set-0-DELETE': '',
'session_set-1-id': sessions[1].pk,
'session_set-1-name': sessions[1].name,
'session_set-1-short': sessions[1].short,
'session_set-1-purpose': sessions[1].purpose_id,
'session_set-1-type': sessions[1].type_id,
'session_set-1-requested_duration': '3600',
'session_set-1-on_agenda': sessions[1].on_agenda,
'session_set-1-remote_instructions': sessions[1].remote_instructions,
'session_set-1-attendees': sessions[1].attendees,
'session_set-1-comments': sessions[1].comments,
'session_set-1-DELETE': '',
'session_set-2-id': '',
'session_set-2-name': '',
'session_set-2-short': '',
'session_set-2-purpose': 'regular',
'session_set-2-type': 'regular',
'session_set-2-requested_duration': '',
'session_set-2-on_agenda': 'True',
'session_set-2-attendees': attendees,
'session_set-2-comments': '',
'session_set-2-DELETE': 'on',
'submit': 'Continue'}
r = self.client.post(url, post_data, HTTP_HOST='example.com')
self.assertRedirects(r, redirect_url)
# Check whether updates were stored in the database
sessions = Session.objects.filter(meeting=meeting, group=mars)
self.assertEqual(len(sessions), 2)
session = sessions[0]
self.assertFalse(session.constraints().filter(name='time_relation'))
self.assertFalse(session.constraints().filter(name='wg_adjacent'))
self.assertFalse(session.constraints().filter(name='timerange'))
self.assertEqual(list(sessions[0].joint_with_groups.all()), [group2])
self.assertFalse(sessions[1].joint_with_groups.count())
# check that a notification was sent
self.assertEqual(len(outbox), 1)
notification_payload = get_payload_text(outbox[0])
self.assertIn('1 Hour, 1 Hour', notification_payload)
self.assertNotIn('1 Hour, 1 Hour, 1 Hour', notification_payload)
# Check whether the updated data is visible on the view page
r = self.client.get(redirect_url)
self.assertContains(r, 'First session with: {}'.format(group2.acronym))
def test_edit_constraint_bethere(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
mars = RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars').group
session = SessionFactory(meeting=meeting, group=mars, status_id='sched')
Constraint.objects.create(
meeting=meeting,
source=mars,
person=Person.objects.get(user__username='marschairman'),
name_id='bethere',
)
self.assertEqual(session.people_constraints.count(), 1)
url = reverse('ietf.secr.sreq.views.edit', kwargs=dict(acronym='mars'))
self.client.login(username='marschairman', password='<PASSWORD>')
attendees = '10'
ad = Person.objects.get(user__username='ad')
post_data = {
'num_session': '1',
'attendees': attendees,
'bethere': str(ad.pk),
'constraint_chair_conflict':'',
'comments':'',
'joint_with_groups': '',
'joint_for_session': '',
'delete_conflict': 'on',
'session_set-TOTAL_FORMS': '3', # matches what view actually sends, even with only 2 filled in
'session_set-INITIAL_FORMS': '1',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
'session_set-0-id':session.pk,
'session_set-0-name': session.name,
'session_set-0-short': session.short,
'session_set-0-purpose': session.purpose_id,
'session_set-0-type': session.type_id,
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': session.on_agenda,
'session_set-0-remote_instructions': session.remote_instructions,
'session_set-0-attendees': attendees,
'session_set-0-comments': '',
'session_set-0-DELETE': '',
'session_set-1-id': '',
'session_set-1-name': '',
'session_set-1-short': '',
'session_set-1-purpose':'regular',
'session_set-1-type':'regular',
'session_set-1-requested_duration': '',
'session_set-1-on_agenda': 'True',
'session_set-1-attendees': attendees,
'session_set-1-comments': '',
'session_set-1-DELETE': 'on',
'session_set-2-id': '',
'session_set-2-name': '',
'session_set-2-short': '',
'session_set-2-purpose': 'regular',
'session_set-2-type': 'regular',
'session_set-2-requested_duration': '',
'session_set-2-on_agenda': 'True',
'session_set-2-attendees': attendees,
'session_set-2-comments': '',
'session_set-2-DELETE': 'on',
'submit': 'Save',
}
r = self.client.post(url, post_data, HTTP_HOST='example.com')
redirect_url = reverse('ietf.secr.sreq.views.view', kwargs={'acronym': 'mars'})
self.assertRedirects(r, redirect_url)
self.assertEqual([pc.person for pc in session.people_constraints.all()], [ad])
def test_edit_inactive_conflicts(self):
"""Inactive conflicts should be displayed and removable"""
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today(), group_conflicts=['chair_conflict'])
mars = RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars').group
session = SessionFactory(meeting=meeting, group=mars, status_id='sched')
other_group = GroupFactory()
Constraint.objects.create(
meeting=meeting,
name_id='conflict', # not in group_conflicts for the meeting
source=mars,
target=other_group,
)
url = reverse('ietf.secr.sreq.views.edit', kwargs=dict(acronym='mars'))
self.client.login(username='marschairman', password='<PASSWORD>')
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
# check that the inactive session is displayed
found = q('input#id_delete_conflict[type="checkbox"]')
self.assertEqual(len(found), 1)
delete_checkbox = found[0]
# check that the label on the checkbox is correct
self.assertIn('Delete this conflict', delete_checkbox.tail)
# check that the target is displayed correctly in the UI
self.assertIn(other_group.acronym, delete_checkbox.find('../input[@type="text"]').value)
attendees = '10'
post_data = {
'num_session': '1',
'attendees': attendees,
'constraint_chair_conflict':'',
'comments':'',
'joint_with_groups': '',
'joint_for_session': '',
'delete_conflict': 'on',
'session_set-TOTAL_FORMS': '1',
'session_set-INITIAL_FORMS': '1',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
'session_set-0-id':session.pk,
'session_set-0-name': session.name,
'session_set-0-short': session.short,
'session_set-0-purpose': session.purpose_id,
'session_set-0-type': session.type_id,
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': session.on_agenda,
'session_set-0-remote_instructions': session.remote_instructions,
'session_set-0-attendees': attendees,
'session_set-0-comments': '',
'session_set-0-DELETE': '',
'submit': 'Save',
}
r = self.client.post(url, post_data, HTTP_HOST='example.com')
redirect_url = reverse('ietf.secr.sreq.views.view', kwargs={'acronym': 'mars'})
self.assertRedirects(r, redirect_url)
self.assertEqual(len(mars.constraint_source_set.filter(name_id='conflict')), 0)
def test_tool_status(self):
MeetingFactory(type_id='ietf', date=datetime.date.today())
url = reverse('ietf.secr.sreq.views.tool_status')
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
r = self.client.post(url, {'message':'locked', 'submit':'Lock'})
self.assertRedirects(r,reverse('ietf.secr.sreq.views.main'))
def test_new_req_constraint_types(self):
"""Configurable constraint types should be handled correctly in a new request
Relies on SessionForm representing constraint values with element IDs
like id_constraint_<ConstraintName slug>
"""
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars')
url = reverse('ietf.secr.sreq.views.new', kwargs=dict(acronym='mars'))
self.client.login(username="marschairman", password="<PASSWORD>")
for expected in [
['conflict', 'conflic2', 'conflic3'],
['chair_conflict', 'tech_overlap', 'key_participant'],
]:
meeting.group_conflict_types.clear()
for slug in expected:
meeting.group_conflict_types.add(ConstraintName.objects.get(slug=slug))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertCountEqual(
[elt.attr('id') for elt in q.items('*[id^=id_constraint_]')],
['id_constraint_{}'.format(conf_name) for conf_name in expected],
)
def test_edit_req_constraint_types(self):
"""Editing a request constraint should show the expected constraints"""
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
SessionFactory(group__acronym='mars',
status_id='schedw',
meeting=meeting,
add_to_schedule=False)
RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars')
url = reverse('ietf.secr.sreq.views.edit', kwargs=dict(acronym='mars'))
self.client.login(username='marschairman', password='<PASSWORD>')
for expected in [
['conflict', 'conflic2', 'conflic3'],
['chair_conflict', 'tech_overlap', 'key_participant'],
]:
meeting.group_conflict_types.clear()
for slug in expected:
meeting.group_conflict_types.add(ConstraintName.objects.get(slug=slug))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertCountEqual(
[elt.attr('id') for elt in q.items('*[id^=id_constraint_]')],
['id_constraint_{}'.format(conf_name) for conf_name in expected],
)
class SubmitRequestCase(TestCase):
def setUp(self):
super(SubmitRequestCase, self).setUp()
# Ensure meeting numbers are predictable. Temporarily needed while basing
# constraint types on meeting number, expected to go away when #2770 is resolved.
MeetingFactory.reset_sequence(0)
def test_submit_request(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
ad = Person.objects.get(user__username='ad')
area = RoleFactory(name_id='ad', person=ad, group__type_id='area').group
group = GroupFactory(parent=area)
group2 = GroupFactory(parent=area)
group3 = GroupFactory(parent=area)
group4 = GroupFactory(parent=area)
session_count_before = Session.objects.filter(meeting=meeting, group=group).count()
url = reverse('ietf.secr.sreq.views.new',kwargs={'acronym':group.acronym})
confirm_url = reverse('ietf.secr.sreq.views.confirm',kwargs={'acronym':group.acronym})
main_url = reverse('ietf.secr.sreq.views.main')
attendees = '10'
comments = 'need projector'
post_data = {'num_session':'1',
'attendees':attendees,
'constraint_chair_conflict':'',
'comments':comments,
'adjacent_with_wg': group2.acronym,
'timeranges': ['thursday-afternoon-early', 'thursday-afternoon-late'],
'joint_with_groups': group3.acronym + ' ' + group4.acronym,
'joint_for_session': '1',
'session_set-TOTAL_FORMS': '1',
'session_set-INITIAL_FORMS': '0',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
# no 'session_set-0-id' to create a new session
'session_set-0-name': '',
'session_set-0-short': '',
'session_set-0-purpose': 'regular',
'session_set-0-type': 'regular',
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': True,
'session_set-0-remote_instructions': '',
'session_set-0-attendees': attendees,
'session_set-0-comments': comments,
'session_set-0-DELETE': '',
'submit': 'Continue'}
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.post(url,post_data)
self.assertEqual(r.status_code, 200)
# Verify the contents of | |
transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# @package mdtools
# Markdown Tools develops for Guichet Entreprises
#
# -----------------------------------------------------------------------------
import logging
import sys
import os
import re
from . import common
# -----------------------------------------------------------------------------
# re expression used for instruction
# -----------------------------------------------------------------------------
__comment_re__ = \
r"<!--(?P<comment>[\s\S]*?)-->"
__begin_ref_re__ = \
r"<!--\s+begin-ref\((?P<name>[a-zA-Z0-9_-]+)\)\s+-->"
__end_ref_re__ = \
r"<!--\s+end-ref\s+-->"
__begin_include_re__ = \
r"<!--\s+begin-include\((?P<name>[a-zA-Z0-9_-]+)\)\s+-->"
__end_include_re__ = \
r"<!--\s+end-include\s+-->"
__var_re__ = \
r"""<!--\s+var\((?P<name>[a-zA-Z0-9:_-]+)\)\s*""" \
r"""=\s*(?P<quote>['\"])(?P<string>.*?)(?<!\\)(?P=quote)\s+-->"""
__begin_var_re__ = \
r"<!--\s+begin-var\((?P<name>[a-zA-Z0-9_-]+)\)\s+-->"
__end_var_re__ = \
r"<!--\s+end-var\s+-->"
__include_file_re__ = \
r"<!--\s+include-file\((?P<name>[\.a-zA-Z0-9_-]+)\)" \
r"(?P<content>[\s\S]*?)-->"
# -----------------------------------------------------------------------------
# strip XML comment.
# Remove all xml comment from a text
#
# @param text the markdown text
# @return the text without xml comment
# -----------------------------------------------------------------------------
def strip_xml_comment(text):
result = re.sub(__comment_re__, "", text)
return result
# -----------------------------------------------------------------------------
# Find refs in a markdown text
#
# @param text the markdown text
# @param previous_refs the previous refs dict for a recursive call
# @return the dict with the refs found key-> value
# -----------------------------------------------------------------------------
def get_refs_from_md_text(text, previous_refs=None):
result = {}
if previous_refs is not None:
result = previous_refs
# search begin
match_begin = re.search(__begin_ref_re__, text)
# finish if no match
if not match_begin:
return result
# There is a match
key = match_begin.group('name')
logging.debug('Find the key reference %s', key)
if key in result:
logging.error(
'Find a new begin-ref(%s), there is a double reference', key)
raise Exception(
'Find a new begin-ref(%s), there is a double reference' % (key))
last_part = text[match_begin.end(0):]
# match end
match_end = re.search(__end_ref_re__, last_part)
if not match_end:
logging.error(
'Find a begin-ref(%s) and not finding the end-ref', key)
raise Exception(
'Find a begin-ref(%s) and not finding the end-ref' % (key))
# remove XML comment and save
# result[key] = strip_xml_comment(last_part[0:match_end.start(0)])
result[key] = last_part[0:match_end.start(0)]
new_text = last_part[match_end.end(0):]
result = get_refs_from_md_text(new_text, result)
return result
# -----------------------------------------------------------------------------
# Find refs in a markdown file
#
# @param filename the markdown file
# @param filename_ext the extension of the markdown file
# @param previous_refs the previous refs dict for a recursive call
# @return the dict with the refs found key-> value
# -----------------------------------------------------------------------------
def get_refs_from_md_file(filename, filename_ext=".md", previous_refs=None):
logging.debug('Find refs in the MD the file %s', filename)
filename = common.check_is_file_and_correct_path(filename, filename_ext)
# Read the file
text = common.get_file_content(filename, encoding="UNKNOWN")
# Analyze
result = get_refs_from_md_text(text, previous_refs=previous_refs)
return result
# -----------------------------------------------------------------------------
# Find refs in markdown file in a folder and subfolder.
# Depth parameter :
# - -1-> every subfolder.
# - 0-> the current level
# - n-> (with n>0) n subfolder level of the folder
#
# @param folder the folder pathname
# @param filename_ext the extension of the markdown file
# @param previous_refs the previous refs dict for a recursive call
# @param depth the depth to search for.
# @return the dict with the refs found key-> value
# -----------------------------------------------------------------------------
def get_refs_from_md_directory(folder, filename_ext=".md",
previous_refs=None, depth=-1):
logging.debug('Find refs in the MD in the folder "%s"', folder)
folder = common.check_folder(folder)
result = {}
if previous_refs is not None:
result = previous_refs
md_files = [os.path.join(folder, f) for f in os.listdir(folder)
if (os.path.isfile(os.path.join(folder, f)) and
(os.path.splitext(f)[1] == filename_ext))]
for filename in md_files:
result = get_refs_from_md_file(filename, filename_ext, result)
logging.debug('End refs in the MD the folder "%s"', folder)
if depth == 0:
return result
folders = [os.path.join(folder, f) for f in os.listdir(folder)
if os.path.isdir(os.path.join(folder, f))]
for dirname in folders:
result = get_refs_from_md_directory(
dirname, filename_ext=".md", previous_refs=result, depth=depth - 1)
return result
# -----------------------------------------------------------------------------
# Find refs around a markdown file.
# -----------------------------------------------------------------------------
def get_refs_other(refs=None, filename_ext=".md", **kwargs):
result = refs if refs is not None else {}
if 'search_folders' not in kwargs:
return result
for folder in kwargs['search_folders']:
result = get_refs_from_md_directory(folder,
filename_ext,
previous_refs=result,
depth=-1)
return result
# -----------------------------------------------------------------------------
# Find refs around a markdown file.
# Depth down parameter :
# - -1-> every subfolder.
# - 0-> the current level
# - n-> (with n>0) n subfolder level of the folder
#
# @param filename the name of the markdown file
# @param filename_ext the extension of the markdown file
# @param previous_refs the previous refs dict for a recursive call
# @param depth_up the number of upper folder to search for.
# @param depth_down the depth to search for.
# @return the dict with the refs found key-> value
# -----------------------------------------------------------------------------
def get_refs_around_md_file(filename, filename_ext=".md",
previous_refs=None,
depth_up=1, depth_down=-1):
logging.debug('Discover refs around the file "%s"', filename)
filename = common.set_correct_path(filename)
current_dir = os.path.abspath(os.path.dirname(filename))
while depth_up > 0:
new_dir = os.path.abspath(os.path.join(current_dir, os.pardir))
if new_dir == current_dir:
depth_up = 0
else:
depth_up = depth_up - 1
if depth_down > 0:
depth_down = depth_down + 1
current_dir = new_dir
result = get_refs_from_md_directory(current_dir,
filename_ext,
previous_refs=previous_refs,
depth=depth_down)
return result
# -----------------------------------------------------------------------------
# Include reference to the markdown text
# \warning All the reference must be defined
#
# @param text the markdown text
# @param begin_include_re the regex to match the begin
# @return all references
# -----------------------------------------------------------------------------
def refs_in_md_text(text, begin_include_re=__begin_include_re__):
return re.findall(begin_include_re, text)
# -----------------------------------------------------------------------------
# Include reference to the markdown text
# \warning All the reference must be defined
#
# @param text the markdown text
# @param refs_include the dict with all the references
# @param begin_include_re the regex to match the begin
# @param end_include_re the regex to match the end
# @param error_if_no_key boolean :
# throw Exception if the key is not found
# @return the markdown text with the include
# -----------------------------------------------------------------------------
def include_refs_to_md_text(text, refs_include,
begin_include_re=__begin_include_re__,
end_include_re=__end_include_re__,
error_if_no_key=True):
# search begin
match_begin = re.search(begin_include_re, text)
# finish if no match
if not match_begin:
return text
key = match_begin.group('name')
logging.debug('Find the key reference %s', key)
if key not in refs_include:
if error_if_no_key:
logging.error(
'Find a begin-include(%s) and '
'not finding the reference', key)
raise Exception(
'Find a begin-include(%s) and '
'not finding the reference' % (key))
return text[0:match_begin.end(0)] + \
include_refs_to_md_text(text[match_begin.end(0):],
refs_include,
begin_include_re=begin_include_re,
end_include_re=end_include_re,
error_if_no_key=error_if_no_key)
result = text[0:match_begin.end(0)] + refs_include[key]
last_part = text[match_begin.end(0):]
match_end = re.search(end_include_re, last_part)
if not match_end:
msg = 'Find a begin-include(%s) and not finding the end-include' % key
logging.error(msg)
raise Exception(msg)
result = result + last_part[match_end.start(0):match_end.end(0)]
result = result + \
include_refs_to_md_text(last_part[match_end.end(0):],
refs_include,
begin_include_re=begin_include_re,
end_include_re=end_include_re,
error_if_no_key=error_if_no_key)
return result
# -----------------------------------------------------------------------------
# Include reference to the markdown text
# \warning All the reference must be defined
#
# @param filename The name and path of the file to work with. This file is
# supposed to be a markdown file.
# @param refs the dict with all the references
# @param backup_option This parameter is set to true by default.
# If the backup option is set,
# then a file named filename.bak will be created.
# @param filename_ext This parameter the markdown extension for the filename.
# @param begin_include_re the regex to match the begin
# @param end_include_re the regex to match the end
# @param error_if_no_key boolean : throw Exception
# if the key is not found
# @return the filename normalized
# -----------------------------------------------------------------------------
def include_refs_to_md_file(filename,
refs,
backup_option=True,
filename_ext=".md",
begin_include_re=__begin_include_re__,
end_include_re=__end_include_re__,
error_if_no_key=True):
logging.debug('Include refs to the file %s', filename)
filename = common.check_is_file_and_correct_path(filename, filename_ext)
# Read the file
text = common.get_file_content(filename)
# Create Backup
if backup_option:
common.create_backup(filename)
# Change inside
text = include_refs_to_md_text(text, refs,
begin_include_re=begin_include_re,
end_include_re=end_include_re,
error_if_no_key=error_if_no_key)
# Save the file
os.remove(filename)
common.set_file_content(filename, text, encoding="utf-8")
return filename
# -----------------------------------------------------------------------------
# Search and include reference to the markdown text
# \warning All the reference must be defined
#
# Depth down parameter :
# - -1-> every subfolder.
# - 0-> the current level
# - n-> (with n>0) n subfolder level of the folder
#
# @param filename The name and path of the file to work with.
# This file is supposed to be a markdown file.
# @param | |
help="Level of logging. (default error)", default="error",
choices=["error", "warn", "debug", "info"])
parser.add_argument("--class_thresholds", "-ct",
help="Class specific thresholds", default=None, nargs='+')
parser.add_argument("--output_vectors_dir", "-vd", help="Output vector path", default=OUTPUT_VECTORS_DIR_PATH)
parser.add_argument("--output_vectors_zip_path","-vzp",help="Output vector zip file path", default=OUTPUT_VECTORS_ZIP_PATH)
parser.add_argument("--zip_vectors", "-zv", help="Flag to zip vector json or not", default= "True")
args = parser.parse_args(argv)
args = validate_args(args)
return args
def validate_args(args):
validate_file("tif", args.tif, False)
validate_file("imd", args.imd, False, ".imd")
validate_files("model_paths", args.model_paths, True)
if args.threshold < 0.0:
error_msg = "The provided threshold of %s is below 0" % args.threshold
log.error(error_msg)
raise ValueError(error_msg)
validate_num_above_0("win_size", args.win_size)
validate_num_above_0("step_size", args.step_size)
validate_num_above_0("pyramid_min_size", args.pyramid_min_size)
validate_num_above_value("pyramid_scale_factor", args.pyramid_scale_factor, 1)
# ensure the min pyramid size < win_size
if args.win_size < args.pyramid_min_size:
error_msg = "The provided min pyramid size (%s) is greater than the window size (%s)" % (args.pyramid_min_size, args.win_size)
log.error(error_msg)
raise ValueError(error_msg)
if args.bounding_box:
validate_bbox(args.bounding_box)
# Validate and modify to create a list of integers [150, 100, etc.]
args.pyramid_window_sizes = validate_create_int_list("pyramid_window_sizes", args.pyramid_window_sizes)
args.pyramid_step_sizes = validate_create_int_list("pyramid_step_sizes", args.pyramid_step_sizes)
# Check pyramid_window_sizes and pyramid_step_sizes has the same length
if args.pyramid_window_sizes and args.pyramid_step_sizes and len(args.pyramid_step_sizes) != len(args.pyramid_window_sizes):
raise RuntimeError("Pyramid window sizes length {} != Pyramid step sizes length {} ".format(len(args.pyramid_window_sizes), len(args.pyramid_step_sizes)))
args.bands = validate_create_int_list("bands", args.bands)
# Cast as integer
args.num_processes = int(args.num_processes)
validate_num_above_0("num_processes", args.num_processes)
if args.gpu_flag.lower() == 'true' and args.num_processes > 1:
log.info('WARNING: Setting gpu_flag to True requires the use of only one process. Setting num_processes = 1.')
args.num_processes = 1
# Now set the log level (already guaranteed to be one of the following)
if args.log_level.lower() == "error":
log_level_val = log.ERROR
elif args.log_level.lower() == "warn":
log_level_val = log.WARN
elif args.log_level == "debug":
log_level_val = log.DEBUG
elif args.log_level == 'info':
log_level_val = log.INFO
log.getLogger().setLevel(log_level_val)
# Now set the thresholds
if args.class_thresholds is None:
args.class_thresholds = {}
else:
try:
args.class_thresholds = class_thresholds_dict(args.class_thresholds)
except Exception, e:
log.info('ERROR: Error setting class thresholds. Format should be Class Name:Value .')
raise RuntimeError(str(e))
return args
def class_thresholds_dict(thresholds):
class_thresh_dict = {}
tmp_key_list = []
for item in thresholds:
if ':' in item:
item_split = item.split(':')
tmp_key_list.append(item_split[0])
threshold = float(item_split[1])
class_name = ""
for key_item in tmp_key_list:
class_name += key_item + " "
# Add to dicitonary, remove empty space at the end
class_thresh_dict[class_name[:-1].lower()] = threshold
tmp_key_list = []
else:
tmp_key_list.append(item)
return class_thresh_dict
def validate_files(arg_name, file_paths, is_dir, extension=None):
if file_paths is None:
error_msg = "The path provided for %s is None" % arg_name
log.error(error_msg)
raise ValueError
for i in range(len(file_paths)):
validate_file("%s[%s]" % (arg_name, i), file_paths[i], is_dir, extension)
def validate_file(arg_name, file_path, is_dir, extension=None):
if file_path is None:
error_msg = "The path provided for %s is None" % arg_name
log.error(error_msg)
raise ValueError
if not os.path.exists(file_path):
error_msg = "The path provided for %s (%s) does not exist" % (arg_name, file_path)
log.error(error_msg)
raise ValueError(error_msg)
if extension and not file_path.lower().endswith(extension):
error_msg = "The path provided for %s (%s) does not end with the extension of %s" % (arg_name, file_path, extension)
log.error(error_msg)
raise ValueError(error_msg)
if not is_dir and not os.path.isfile(file_path):
error_msg = "The path provided for %s (%s) is not a file but is expected to be one" % (arg_name, file_path)
log.error(error_msg)
raise ValueError(error_msg)
if is_dir and not os.path.isdir(file_path):
error_msg = "The provided for %s (%s) is not a directory but is expected to be one" % (arg_name, file_path)
log.error(error_msg)
raise ValueError(error_msg)
def validate_num_above_value(arg_name, actual, expected):
if actual < expected:
error_msg = "The provided value (%s) for %s is below %s" % (actual, arg_name, expected)
log.error(error_msg)
raise RuntimeError("The provided value (%s) for %s is below %s" % (actual, arg_name, expected))
def validate_create_int_list(arg_name, value):
if value is None:
return
error_msg = "The provided value %s for %s should be a list of integers" % (value, arg_name)
val_list = list(ast.literal_eval(value))
for index, val in enumerate(val_list):
try:
val_list[index] = int(val)
except:
log.error(error_msg)
raise RuntimeError(error_msg)
return val_list
def validate_num_above_0(arg_name, value):
validate_num_above_value(arg_name, value, 0)
def validate_bbox(bounding_box):
bounding_box_values = [float(val.strip()) for val in bounding_box.split(" ")]
if len(bounding_box_values) != 4:
raise RuntimeError("The provided value (%s) for %s must have only 4 values" % (bounding_box, "bounding_box"))
if bounding_box_values[0] < -180:
raise RuntimeError("The provided value (%s) for minX must be more than -180" % bounding_box_values[0])
if bounding_box_values[1] < -90:
raise RuntimeError("The provided value (%s) for minY must be more than -90" % bounding_box_values[1])
if bounding_box_values[2] > 180:
raise RuntimeError("The provided value (%s) for maxX must be less than 180" % bounding_box_values[2])
if bounding_box_values[3] > 90:
raise RuntimeError("The provided value (%s) for maxY must be less than 90" % bounding_box_values[3])
def parse_imd_file(imd_file_path):
return "sat_id", "cat_id", "date"
def get_classifier_paths(model_dir):
caffemodel = None
deploy_file = None
mean_file = None
labels_file = None
for root, dirs, files in os.walk(model_dir):
for model_file in files:
full_path = os.path.join(root, model_file)
if model_file.endswith(CAFFEMODEL_SUFFIX):
caffemodel = full_path
elif model_file.endswith(DEPLOY_FILE_SUFFIX):
deploy_file = full_path
elif model_file.endswith(MEAN_FILE_SUFFIX):
mean_file = full_path
elif model_file.endswith(LABELS_FILE_SUFFIX):
labels_file = full_path
return caffemodel, deploy_file, mean_file, labels_file
def calc_tile_ranges(num_tiles, num_procs):
tiles_per_proc = num_tiles/num_procs
tile_ranges = np.zeros((num_procs+1), dtype=np.int)
for i in range(num_procs):
if i < num_tiles % num_procs:
# First procs divide remainder (if it exists)
tile_ranges[i+1] = tile_ranges[i] + tiles_per_proc + 1
else:
tile_ranges[i+1] = tile_ranges[i] + tiles_per_proc
return tile_ranges
def stretchData(data,pct=2):
# Linear 2pct stretch
a,b = np.percentile(data, [pct, 100-pct])
return 255 * ( data - a ) / (b-a+0.001)
def tile_list_classifier(tile_list, args, image_name, item_date, sat_id, cat_id, mean_files, caffemodels, deploy_files, labels_files, bands, threshold_dict):
# Create classifier object
classifier = CaffeBatchClassifier(
caffe_models=caffemodels,
deploy_files=deploy_files,
label_files=labels_files,
mean_files=mean_files,
gpu_flag=args.gpu_flag)
# open image
gdal_image = GDALImage(args.tif, RASTER_DIM, RASTER_DIM, strideX=RASTER_STRIDE, strideY=RASTER_STRIDE, bands=bands, padWithZeros=True)
for tile in tile_list:
timerStart = time.time()
image = gdal_image.readTile( tile[0], tile[1], tile[2], tile[3] )
if image.shape[0] < classifier.get_caffe_num_channels():
log.info("Exception: Cannot run imagery with fewer bands than Caffe model.")
raise RuntimeError
classify_broad_area_multi_process(gdal_image, image, tile[0], tile[1], args,
image_name, item_date, sat_id, cat_id,
mean_files, caffemodels, deploy_files, labels_files,
classifier, args.gpu_flag, threshold_dict)
timerEnd = time.time()
log.info("Time for tile: "+str(timerEnd-timerStart))
def set_class_thresholds(labels_files, global_threshold, class_threshold_dict):
# Assume all models have the same labels file
label_file_name = labels_files[0]
thresh_dict = {}
with open(label_file_name, 'r') as label_file:
for line in label_file:
thresh_dict[line[:-1].lower()] = global_threshold
for k, v in class_threshold_dict.iteritems():
if k in thresh_dict:
thresh_dict[k.lower()] = v
else:
log.info("ERROR: Class name {} not found in model label file {}.".format(k, label_file_name))
raise RuntimeError
return thresh_dict
#@profile(filename="ens_gpu.prof", profiler="cProfile")
def process_args(args):
time0 = time.time()
image_name = args.image_name or args.tif
caffemodels = []
deploy_files = []
mean_files = []
labels_files = []
for i in range(len(args.model_paths)):
caffemodel, deploy_file, mean_file, labels_file = get_classifier_paths(args.model_paths[i])
caffemodels.append(caffemodel)
deploy_files.append(deploy_file)
mean_files.append(mean_file)
labels_files.append(labels_file)
if args.class_thresholds == None:
args.class_thresholds = {}
threshold_dict = set_class_thresholds(labels_files, args.threshold, args.class_thresholds)
log.info("Thresholds: {}".format(threshold_dict))
init_vector_dir(args.output_vectors_dir)
# Open this once here to generate the list of tiles to distribute
gdal_image = GDALImage(args.tif, RASTER_DIM, RASTER_DIM, strideX=RASTER_STRIDE, strideY=RASTER_STRIDE, padWithZeros=True)
# Parse metadata if available
item_date, cat_id, sat_id = ( None, None, None)
if args.imd:
try:
item_date, cat_id, sat_id = parse_imd(args.imd)
except Exception, e:
log.info("Could not parse imd file "+str(e))
# Set bounding box before tile generation
if args.bounding_box:
bb = [float(val.strip()) for val in args.bounding_box.split(" ")]
gdal_image.setGeoBoundingBox(bb[0],bb[1],bb[2],bb[3])
# Return the list of tiles
all_tiles = list( gdal_image.nextTile() )
# Calculate the number of tiles
num_tiles = len(all_tiles)
num_procs = args.num_processes
proc_tile_ranges = calc_tile_ranges(num_tiles, num_procs)
log.info("Time to generate {} tiles {} seconds".format(num_tiles, time.time() - time0))
time0 = time.time()
if ( args.num_processes < 2 ):
tile_list_classifier(all_tiles, args, image_name, item_date, sat_id, cat_id, mean_files,
caffemodels, deploy_files, labels_files, args.bands, threshold_dict)
else:
manager = mp.Manager()
pool = mp.Pool(processes=args.num_processes)
log.warn("pool size = %s" % str(args.num_processes))
# For each process launch one async job
for i in range(num_procs):
tile_list = all_tiles[proc_tile_ranges[i]:proc_tile_ranges[i+1]]
# Each process needs to instantiate its own gdal image due to native code dependencies, file handles etc.,
# it cannot be passed in to apply_async
pool.apply_async(tile_list_classifier,
(tile_list, args, image_name, item_date, sat_id, cat_id, mean_files,
caffemodels, deploy_files, labels_files, args.bands, threshold_dict))
pool.close()
pool.join()
log.info("Total detection time {} seconds".format(time.time() - time0))
# close geojson results
with open( os.path.join( args.output_vectors_dir, OUTPUT_VECTORS_FILE ), "a") as fout:
fout.write("] }")
time0 = time.time()
if args.zip_vectors != None and args.zip_vectors.lower() == "true":
zip_file_cnt = len(glob.glob(os.path.join(args.output_vectors_dir,"*.json")))
log.info("Start to zip {0} json files".format(zip_file_cnt))
zip_vectors_file(args.output_vectors_dir, args.output_vectors_zip_path)
log.info("Time to zip | |
trajectory:
b_2=np.concatenate((b_2,bCrrnt_2),axis=0) # cm
# Total log10 of two important ratios; dimensionless :
larmR_b_2=np.concatenate((larmR_b_2,larmR_bCrrnt_2),axis=0)
uPot_enrgKin_2=np.concatenate((uPot_enrgKin_2,uPot_enrgKinCrrnt_2),axis=0)
# Total values deltaPapprch_2 (g*cm/sec):
dpxApprch_2=np.concatenate((dpxApprch_2,dpApprch_2Crrnt[0,:]),axis=0)
dpyApprch_2=np.concatenate((dpyApprch_2,dpApprch_2Crrnt[1,:]),axis=0)
dpzApprch_2=np.concatenate((dpzApprch_2,dpApprch_2Crrnt[2,:]),axis=0)
# print 'trackNumb_2:%d: shapes: b=%d, larmR_b_2=%d, uPot=%d, dpx=%d, dpy=%d, dpz=%d' % \
# (trackNumb_2,b.shape[0],larmR_b_2.shape[0],uPot_enrgKin_2.shape[0], \
# dpxApprch_2.shape[0],dpyApprch_2.shape[0],dpzApprch_2.shape[0])
#
# To draw TMTdpx trajectory (for checking only):
#
if maxAbsDpxApprch_2 < totAbsDpxApprch_2:
maxAbsDpxApprch_2=totAbsDpxApprch_2
indxAmaxAbsDpxApprch_2=iA
indxBmaxAbsDpxApprch_2=iB
trackNumbMaxAbsDpxApprch_2=trackNumb
prtclCoorMaxAbsDpx_2=prtclCoorCrrnt_2.copy()
rhoMaxAbsDpxTurn_2=rhoCrrnt[iA,iB]
rhoLarmorMaxAbsDpxTurn_2=rho_larm[iA,iB]
print 'iA=%d, iB=%d: track %d, points %d' % (indxAmaxAbsDpxApprch_2,\
indxBmaxAbsDpxApprch_2,trackNumbMaxAbsDpxApprch_2, \
pointTrack[trackNumbMaxAbsDpxApprch_2])
print 'timePoints.shape: ', (prtclCoorMaxAbsDpx_2.shape[0], \
. prtclCoorMaxAbsDpx_2.shape[1])
# End of all calculations for approach_2
lastTrackNumber_2=trackNumb_2+1 # quantity of tracks = trackNumb_2 + 1!
sumPoints_2 += pointTrack_2[trackNumb_2]
timeEnd=os.times()
cpuTime_2[trackNumb_2]=1.e+6*(float(timeEnd[0])-float(timeStart[0])) # CPU time, mks
cpuTimeTotal += cpuTime_2[trackNumb_2]
#
#------- End of approach_2 --------------
#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# Approach_3: dragging with averaging over nLarmorAvrgng larmor rotation +
# "Magnus expansion" method to calculate the transferred momenta
#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
#------- Start of calculations for approach_3 --------------
#
if runFlagApproach_3 == 1:
timeStart=os.times()
if trackNumb_3 == 0:
rhoFirstTurn=rhoCrrnt[iA,iB]
rhoLarmorFirstTurn=rho_larm[iA,iB]
# 6-vectors for ion and electron and distance 'b' between them for the first trajectories
# (for checking only; indices 0-5 for electron, indices 6-11 for ion, index=12 for 'b',
# index=13 for action and index=14 for dy_gc):
prtclCoor_3=np.zeros((15,timePoints_3[trackNumb_3]))
prtclCoorCrrnt_3=np.zeros((15,timePoints_3[trackNumb_3])) # current trajectory
prtclCoorMaxAbsDp_3=np.zeros((15,timePoints_3[trackNumb_3])) # trajectory TMTdpx
# Current distance from origin of the coordinate system to electron along the trajectory; cm
bCrrnt_3=np.zeros(timePoints_3[trackNumb_3]) # cm
# Current log10 of two important ratios; dimensionless:
larmR_bCrrnt_3=np.zeros(timePoints_3[trackNumb_3]) # ratio R_larmor/b; dimensionless
uPot_enrgKinCrrnt_3=np.zeros(timePoints_3[trackNumb_3]) # ratio potential_energy/kinetic_energy; dimensionless
dpApprch_3Crrnt=np.zeros((3,timePoints_3[trackNumb_3])) # g*cm/sec
for m in range(6):
z_ionCrrnt_3[m]=0. # Current initial zero-vector for ion
z_elecCrrnt_3[m]=0. # Zeroing out of vector for electron
# Current initial vector for electron:
z_elecCrrnt_3[Ix]=rhoCrrnt[iA,iB]+larmorRadius[trackNumb_3] # x, cm
z_elecCrrnt_3[Iz]=-halfLintr[iA,iB] # z, cm
z_elecCrrnt_3[Ipy]=m_elec*evTran[iA,iB] # py, g*cm/sec
z_elecCrrnt_3[Ipz]=m_elec*eVrmsLong # pz, g*cm/sec
z_elecCrrnt_gc=toGuidingCenter(z_elecCrrnt_3) # transfer to system of guiding center
# if iA == 0 and iB == 0:
# print 'z_elecCrrnt_3: ', z_elecCrrnt_3
# print 'z_elecCrrnt_gc: ', z_elecCrrnt_gc
#-----------------------------------------------
# Main action - dragging of the current trajectories (for given i and j)
#
for k in range(int(timePoints_3[trackNumb_3])):
# if k < 100:
# print 'k=%d: x=%e, y=%e, z=%e' % (k,1.e+4*z_elecCrrnt_gc[0],1.e+4*z_elecCrrnt_gc[2],1.e+4*z_elecCrrnt_gc[4])
# if k > timePoints_2[trackNumb_3]-100:
# print 'k=%d: x=%e, y=%e, z=%e' % (k,1.e+4*z_elecCrrnt_gc[0],1.e+4*z_elecCrrnt_gc[2],1.e+4*z_elecCrrnt_gc[4])
# z_elecCrrnt_gc=matr_elec_3.dot(z_elecCrrnt_gc) # electron's dragging for first half timeStep
# z_ionCrrnt_3=matr_ion_3.dot(z_ionCrrnt_3) # ion's dragging for first half timeStep
z_elecCrrnt_gc=np.dot(matr_elec_3,z_elecCrrnt_gc) # electron's dragging for first half timeStep
z_ionCrrnt_3=np.dot(matr_ion_3,z_ionCrrnt_3) # ion's dragging for first half timeStep
# dragging both paticles through interaction point:
dpIon,dpElec,action,dy_gc=MagnusExpansionCollision(z_elecCrrnt_gc,z_ionCrrnt_3,timeStep_3)
# if trackNumb_3 == 0:
# print 'point %d: dpxIon=%e, dpyIon=%e, dpzIon=%e, dy_gc=%e' % \
# (pointTrack_3[0],dpIon[0],dpIon[1],dpElec[2],dy_gc)
for ic in range(3):
z_ionCrrnt_3[2*ic+1] += dpIon[ic]
z_elecCrrnt_gc[2*ic+1] += dpElec[ic]
# Current values to calculate deltaPapprch_3:
dpApprch_3Crrnt[ic,k]=dpIon[ic] # g*cm/sec
z_elecCrrnt_gc[2] += dy_gc # cm
# z_elecCrrnt_gc=matr_elec_2.dot(z_elecCrrnt_gc) # electron's dragging for second half timeStep
# z_ionCrrnt_2=matr_ion_2.dot(z_ionCrrnt_2) # ion's dragging for second half timeStep
z_elecCrrnt_gc=np.dot(matr_elec_3,z_elecCrrnt_gc) # electron's dragging for second half timeStep
z_ionCrrnt_2=np.dot(matr_ion_3,z_ionCrrnt_3) # ion's dragging for second half timeStep
# crrntPoint=pointTrack_3[trackNumb_3]
# if iA == 0 and iB == 0 and crrntPoint < 10:
# print 'k, z_ionCrrnt_3', (k,z_ionCrrnt_3)
z_elecCrrnt_3=fromGuidingCenter(z_elecCrrnt_gc) # transfer from system of guiding center
# Current distance between ion and electron; cm:
bCrrnt_3[k]=np.sqrt((z_ionCrrnt_3[0]-z_elecCrrnt_3[0])**2+ \
(z_ionCrrnt_3[2]-z_elecCrrnt_3[2])**2+ \
(z_ionCrrnt_3[4]-z_elecCrrnt_3[4])**2)
# Current log10 of two important ratios:
larmR_bCrrnt_3[k]=math.log10(rho_larm[iA,iB]/bCrrnt_3[k]) # dimensionless
if maxLarmR_b_3 < larmR_bCrrnt_3[k]:
maxLarmR_b_3=larmR_bCrrnt_3[k]
if minLarmR_b_3 > larmR_bCrrnt_3[k]:
minLarmR_b_3=larmR_bCrrnt_3[k]
uPot_enrgKinCrrnt_3[k]=math.log10((q_elec**2/bCrrnt_3[k])/kinEnergy[iA,iB]) # dimensionless
if maxUpot_enrgKin_3 < uPot_enrgKinCrrnt_3[k]:
maxUpot_enrgKin_3=uPot_enrgKinCrrnt_3[k]
if minUpot_enrgKin_3 > uPot_enrgKinCrrnt_3[k]:
minUpot_enrgKin_3=uPot_enrgKinCrrnt_3[k]
# To draw future TMTdpx trajectory (for checking only):
for ic in range(6):
prtclCoorCrrnt_3[ic,pointTrack_3[trackNumb_3]]=z_elecCrrnt_3[ic] # 6-vector for electron
prtclCoorCrrnt_3[6+ic,pointTrack_3[trackNumb_3]]=z_ionCrrnt_3[ic] # 6-vector for ion
prtclCoorCrrnt_3[12,pointTrack_3[trackNumb_3]]=bCrrnt_3[k] # cm
prtclCoorCrrnt_3[13,pointTrack_3[trackNumb_3]]=action # g*cm^2/sec
prtclCoorCrrnt_3[14,pointTrack_3[trackNumb_3]]=dy_gc # cm
# To draw only first trajectory (for checking only):
if trackNumb_3 == 0:
for ic in range(6):
prtclCoor_3[ic,pointTrack_3[trackNumb_3]]=z_elecCrrnt_3[ic] # 6-vector for electron
prtclCoor_3[6+ic,pointTrack_3[trackNumb_3]]=z_ionCrrnt_3[ic] # 6-vector for ion
prtclCoor_3[12,pointTrack_3[trackNumb_3]]=bCrrnt_3[k] # cm
prtclCoor_3[13,pointTrack_3[trackNumb_3]]=action # g*cm^2/sec
prtclCoor_3[14,pointTrack_3[trackNumb_3]]=dy_gc # cm
# crrntPoint=pointTrack_3[trackNumb_3]
# if crrntPoint < 100 or crrntPoint > 1700:
# print 'Point %d: x=%e, y=%e, z=%e' % \
# (crrntPoint,1.e+7*prtclCoor_3[6,crrntPoint],1.e+7*prtclCoor_3[8,crrntPoint], \
# 1.e+7*prtclCoor_3[10,crrntPoint])
pointTrack_3[trackNumb_3] += 1
#
# Total transferred dpx for current track:
#
totAbsDpxApprch_3 += abs(dpFactor*dpApprch_3Crrnt[0,k])
# End of dragging of the current trajectory
#-----------------------------------------------
#
# Accumulate transferred momenta and other data:
#
if trackNumb_3 == 0:
# First definition of the total distance from origin of the coordinate system to electron along the trajectory:
b_3=bCrrnt_3 # cm
# First definition of the log10 of two important ratios; dimensionless:
larmR_b_3=larmR_bCrrnt_3
uPot_enrgKin_3=uPot_enrgKinCrrnt_3
# First definition of the values deltaPapprch_3:
dpxApprch_3=dpApprch_3Crrnt[0,:] # g*cm/sec
dpyApprch_3=dpApprch_3Crrnt[1,:] # g*cm/sec
dpzApprch_3=dpApprch_3Crrnt[2,:] # g*cm/sec
else:
# Total distance from origin of the coordinate system to electron along the trajectory:
b_3=np.concatenate((b_3,bCrrnt_3),axis=0) # cm
# Total log10 of two important ratios; dimensionless :
larmR_b_3=np.concatenate((larmR_b_3,larmR_bCrrnt_3),axis=0)
uPot_enrgKin_3=np.concatenate((uPot_enrgKin_3,uPot_enrgKinCrrnt_3),axis=0)
# Total values deltaPapprch_3:
dpxApprch_3=np.concatenate((dpxApprch_3,dpApprch_3Crrnt[0,:]),axis=0) # g*cm/sec
dpyApprch_3=np.concatenate((dpyApprch_3,dpApprch_3Crrnt[1,:]),axis=0) # g*cm/sec
dpzApprch_3=np.concatenate((dpzApprch_3,dpApprch_3Crrnt[2,:]),axis=0) # g*cm/sec
# print 'trackNumb_3:%d: shapes: b=%d, larmR_b_3=%d, uPot=%d, dpx=%d, dpy=%d, dpz=%d' % \
# (trackNumb_3,b.shape[0],larmR_b_3.shape[0],uPot_enrgKin_3.shape[0], \
# dpxApprch_3.shape[0],dpyApprch_3.shape[0],dpzApprch_3.shape[0])
#
# To draw TMTdpx trajectory (for checking only):
#
if maxAbsDpxApprch_3 < totAbsDpxApprch_3:
maxAbsDpxApprch_3=totAbsDpxApprch_3
indxAmaxAbsDpxApprch_3=iA
indxBmaxAbsDpxApprch_3=iB
trackNumbMaxAbsDpxApprch_3=trackNumb
prtclCoorMaxAbsDpx_3=prtclCoorCrrnt_3.copy()
rhoMaxAbsDpxTurn_3=rhoCrrnt[iA,iB]
rhoLarmorMaxAbsDpxTurn_3=rho_larm[iA,iB]
print 'iA=%d, iB=%d: track %d, points %d' % (indxAmaxAbsDpxApprch_3,\
indxBmaxAbsDpxApprch_3,trackNumbMaxAbsDpxApprch_3,pointTrack[trackNumbMaxAbsDpxApprch_3])
print 'timePoints.shape: ', (prtclCoorMaxAbsDpx_3.shape[0],prtclCoorMaxAbsDpx_3.shape[1])
# End of all calculations for approach_3
lastTrackNumber_3=trackNumb_3+1 # quantity of tracks = trackNumb_2 + 1!
sumPoints_3 += pointTrack_3[trackNumb_3]
timeEnd=os.times()
cpuTime_3[trackNumb_3]=1.e+6*(float(timeEnd[0])-float(timeStart[0])) # CPU time , mks
cpuTimeTotal += cpuTime_3[trackNumb_3]
# for k in range (int(pointTrack_3[0])):
# print 'Point %d: dpxIon=%e, dpyIon=%e, dpzIon=%e' % \
# (k,dpxApprch_3[k],dpyApprch_3[k],dpzApprch_3[k])
#
#------- End of approach_3 --------------
#
if runFlagApproach_1 == 1:
print 'Approach_1: maxYcoorElec=%e mkm, maxYcoorIon=%e nm' % (1.e+4*maxYcoorElec,1.e+7*maxYcoorIon)
print 'Approach_1: for %d tracks number of points is %d' % (lastTrackNumber,sumPoints)
if runFlagApproach_2 == 1:
print 'Approach_2: for %d tracks number of points is %d' % (lastTrackNumber_2,sumPoints_2)
# for i in range(b_2LenFirstTrack):
# print 'action(%d)=%e' % (i,prtclCoor_2[13,i])
if runFlagApproach_3 == 1:
print 'Approach_3: for %d tracks number of points is %d' % (lastTrackNumber_3,sumPoints_3)
# for i in range(lastTrackNumber):
# print 'Track %d: larmor turns=%d, cpuTime(mks)=%e, time per turn(mks)=%6.1f' % \
# (i,larmorNumber[i],cpuTime[i],cpuTime[i]/larmorNumber[i])
print 'cpuTimeTotal(mksec) = %e' % cpuTimeTotal
#
# First track: to compare distances between particles for approach_1 and approach_2,3 (figure 325):
#
if runFlagApproach_1 == 1:
bArray=np.asarray(b)
bLenFirstTrack=int(timePoints[0])
print 'First track length for b: %d' % bLenFirstTrack
if runFlagApproach_2 == 1:
b_2Array=np.asarray(b_2)
b_2LenFirstTrack=int(timePoints_2[0])
print 'First track length for b_2: %d' % b_2LenFirstTrack
if runFlagApproach_3 == 1:
b_3Array=np.asarray(b_3)
b_3LenFirstTrack=int(timePoints_3[0])
print 'First track length for b_3: %d' % b_3LenFirstTrack
#
# Calculation of the difference for distance beteween electron and ion for approach_1 and approach_2:
#
if runFlagApproach_1 == 1 and runFlagApproach_2 == 1:
diff_b2=np.zeros(b_2LenFirstTrack)
print 'b_2LenFirstTrack=%d' % b_2LenFirstTrack
k=0
nStart=0
for m in range(b_2LenFirstTrack):
for n in range(nStart,bLenFirstTrack):
if prtclCoor[4,n] == prtclCoor_2[4,m]:
diff_b2[m]=bArray[n]-b_2Array[m]
nStart=n-1
break
if prtclCoor[4,n] > prtclCoor_2[4,m]:
if n == 0:
diff_b2[m]=bArray[n]-b_2Array[m]
else:
bCurrent=bArray[n-1]+(bArray[n]-bArray[n-1])*(prtclCoor_2[4,m]-prtclCoor[4,n-1])/(prtclCoor[4,n]-prtclCoor[4,n-1])
diff_b2[m]=bCurrent-b_2Array[m]
nStart=n-1
break
#
# First track: to compare distances between particles for approach_1 and approach_3 (figure 335):
#
if runFlagApproach_1 == 1 and runFlagApproach_3 == 1:
diff_b3=np.zeros(b_3LenFirstTrack)
print 'b_3LenFirstTrack=%d' % b_3LenFirstTrack
#
# Calculation of the difference for distance beteween electron and ion for approach_1 and approach_3:
#
k=0
nStart=0
for m in range(b_3LenFirstTrack):
for n in range(nStart,bLenFirstTrack):
if prtclCoor[4,n] == prtclCoor_3[4,m]:
diff_b3[m]=bArray[n]-b_3Array[m]
nStart=n-1
break
if prtclCoor[4,n] > prtclCoor_3[4,m]:
if n == 0:
diff_b3[m]=bArray[n]-b_3Array[m]
else:
bCurrent=bArray[n-1]+(bArray[n]-bArray[n-1])*(prtclCoor_3[4,m]-prtclCoor[4,n-1])/(prtclCoor[4,n]-prtclCoor[4,n-1])
diff_b3[m]=bCurrent-b_3Array[m]
nStart=n-1
break
nBins=80
timeStart=os.times()
###################################################
#
# Data processing of the first approach:
#: layout of arrays xA=uPot_enrgKin, yB=larmR_b to nBins channels
# and arrays zApprch1dpx, zApprch1dpy, zApprch1dpz to (nBins x nBins) channels
#
###################################################
if runFlagApproach_1 == 1:
xA=np.zeros(nBins)
xAedges=np.zeros(nBins+1)
xAnumb=np.zeros(nBins)
xAstep=(maxUpot_enrgKin_1-minUpot_enrgKin_1)/nBins
for i in range(nBins+1):
xAedges[i]=minUpot_enrgKin_1+xAstep*i
yB=np.zeros(nBins)
yBedges=np.zeros(nBins+1)
yBnumb=np.zeros(nBins)
yBstep=(maxLarmR_b_1-minLarmR_b_1)/nBins
for i in range(nBins+1):
yBedges[i]=minLarmR_b_1+yBstep*i
zApprch1dpNumb=np.zeros((nBins,nBins))
zApprch1dpx=np.zeros((nBins,nBins))
zApprch1dpy=np.zeros((nBins,nBins))
zApprch1dpz=np.zeros((nBins,nBins))
for nPoint in range(int(sumPoints)):
for iA in range(nBins):
searchAflag=0
if (xAedges[iA] <= uPot_enrgKin[nPoint] < xAedges[iA+1]):
if xAnumb[iA] == 0:
xA[iA]=uPot_enrgKin[nPoint] # log10(Upot/Ekin)
else:
xA[iA]=(xA[iA]*xAnumb[iA]+uPot_enrgKin[nPoint])/(xAnumb[iA]+1) # averaging inside bin iA
xAnumb[iA] += 1
searchAflag=1
break
if searchAflag == 0:
xA[nBins-1]=(xA[nBins-1]*xAnumb[nBins-1]+uPot_enrgKin[nPoint])/(xAnumb[nBins-1]+1) # averaging inside bin iA
xAnumb[nBins-1] += 1
for iB in range(nBins):
searchBflag=0
if (yBedges[iB] <= larmR_b[nPoint] < yBedges[iB+1]):
if yBnumb[iB] == 0:
yB[iB]=larmR_b[nPoint] # log10(Rlarm/b)
else:
yB[iB]=(yB[iB]*yBnumb[iB]+larmR_b[nPoint])/(yBnumb[iB]+1) # averaging inside bin iB
yBnumb[iB] += 1
searchBflag=1
break
if searchBflag == 0:
yB[nBins-1]=(yB[nBins-1]*yBnumb[nBins-1]+larmR_b[nPoint])/(yBnumb[nBins-1]+1) # averaging inside bin iB
yBnumb[nBins-1] += 1
| |
import sys
import os
import time
from json_tricks.np import dump, load
from functools import reduce
import numpy as np
import tensorflow as tf
from sklearn.linear_model import LinearRegression
# from scipy.sparse import hstack, csr_matrix, csr
import pandas as pd
import edward as ed
from edward.models import Normal
if "../modules" not in sys.path:
sys.path.append("../modules")
from preprocess import *
from ScipyOptimizerInterface import ScipyOptimizerInterface
def run_sklearn_optim(optimizer, feed_dict, sess, loss, print_freq = 10):
'''Run sklearn optimizer and keep track of loss.
INPUTS:
optimizer: optimizer op
feed_dict:
sess: tf session
loss: loss op
print_freq: print loss per n iters
OUTPUTS:
dict of info on optimization results'''
global_cnt = 0
def callback(loss):
nonlocal global_cnt
if global_cnt % print_freq == 0:
print(loss)
sys.stdout.flush()
global_cnt += 1
results = optimizer.minimize(sess, feed_dict = feed_dict, fetches = [loss], loss_callback = callback)
return(results)
def make_sparse_tensor(csr_mat):
'''Take a sparse matrix in csr format and makes a tf.SparseTensor'''
coo_mat = csr_mat.tocoo()
inds = np.concatenate([coo_mat.row[:,None], coo_mat.col[:,None]], axis = 1)
vals = tf.to_float(coo_mat.data)
sp_tens = tf.SparseTensor(indices=inds, values=vals, dense_shape=coo_mat.shape)
return(sp_tens)
def update_param_dict(defaults, new_vals):
'''Take a default dict and a dict of values to override and return the updated dict'''
if new_vals is not None:
assert(all(k in defaults.keys() for k in new_vals.keys()))
defaults.update(new_vals)
return(defaults)
def merge_dicts(orig_dict, add_dict):
'''Update a dict with key-value pairs from a new dict'''
new_dict = orig_dict.copy()
new_dict.update(add_dict)
return(new_dict)
def SSMD(pop1, pop2):
'''Calculate SSMD between two samples'''
beta = np.nanmean(pop1) - np.nanmean(pop2)
beta = beta / np.sqrt(np.nanstd(pop1) ** 2 + np.nanstd(pop2) ** 2)
return(beta)
def make_eval_masks(LFC_mats, test_ind_sets, inverse = False):
'''Make boolean array to mask data used for training'''
eval_masks = []
if test_ind_sets is None:
test_ind_sets = [np.array([], dtype = int) for _ in range(len(LFC_mats))]
for LFC_mat, test_inds in zip(LFC_mats, test_ind_sets):
cur_eval_mask = np.ones_like(LFC_mat, dtype=bool)
cur_eval_mask[test_inds] = False
if inverse: #if you want to evaluate on the test set
cur_eval_mask = ~cur_eval_mask
cur_eval_mask[np.isnan(LFC_mat)] = False
eval_masks.append(cur_eval_mask)
return(eval_masks)
def compute_hairpins_per_gene_CL(LFC_mats, sparse_mat, unique_hp_seqs, unique_CLs, unique_genes):
'''Estimate number of measured hairpin LFCs per gene/CL or seed/CL
INPUTS:
LFC_mats: list of hairpin LFC mats
sparse_mat: sparse matrix mapping hairpins to genes/seeds
unique_hp_seqs: ordered list of unique hairpins
unique_CLs: ordered list of unique CLs
unique_genes: ordered list of unique genes
OUTPUTS:
matrix with number used hairpins per gene/CL
'''
hp_ind_map = {name: ind for ind, name in enumerate(unique_hp_seqs)}
CL_ind_map = {name: ind for ind, name in enumerate(unique_CLs)}
n_hps_per_gene = np.zeros((len(unique_genes), len(unique_CLs)))
for LFC_mat in LFC_mats:
cur_hp_set = [hp_ind_map[x] for x in LFC_mat.index.values]
cur_CL_set = [CL_ind_map[x] for x in LFC_mat.columns.values]
cur_hps_per_gene = sparse_mat[cur_hp_set,:].transpose().dot(~np.isnan(LFC_mat))
n_hps_per_gene[:, cur_CL_set] = n_hps_per_gene[:, cur_CL_set] + cur_hps_per_gene
return(n_hps_per_gene)
def map_effects(scores, CL_inds, sparse_mat):
'''Apply hairpin mapping to predicted scores'''
cur_scores = tf.gather(scores, CL_inds)
return(tf.sparse_tensor_dense_matmul(sparse_mat, cur_scores, adjoint_a = False, adjoint_b = True))
#******************* DEFINE DEMETER2 MODEl CLASS *************************#
class demeter:
'''Class implementing a DEMETER2 model'''
def default_reg_params(self):
params = {
'hairpin_l2_lambda': 0,
'hp_unpred_l2_lambda': 0,
'CL_l2_lambda': 0,
'gene_l2_lambda': 0, #L2 penalty on across-CL avg
'rel_gene_l2_lambda': 0, #L2 penalty on deviation from mean
'seed_l2_lambda': 0,
'rel_seed_l2_lambda': 0 #L2 penalty on deviation from mean
}
return(params)
def default_optim_params(self):
params = {'precision': 'double',
'maxiter': 2000,
'print_freq': 50,
'ftol': 1e-7}
return(params)
def __init__(self, LFC_mats, gene_matrix, seed_matrix, gene_sets, data_names = None,
reg_params = None, optim_params = None, test_inds = None, log_file = None):
'''
Create a demeter model instance
INPUTS:
LFC_mats: List of matrices [hairpins x CLs] of observed LFC values, one per batch/dataset
gene_matrix: [n_hairpins, n_genes] gene-target mapping, as a scipy csr sparse matrix.
seed_matrix: [n_hairpins, n_seeds] seed-target mapping, as a scipy csr sparse matrix.
gene_sets: dict with two entries 'pos' and 'neg'. Each are arrays of Gene names specifying positive and negative control gene sets respectively
data_names: dict of names for different entities (genes, CLs, hps)
reg_params: dict of regularization parameters.
Specify optional lambdas [hairpin_l2_lambda, CL_l2_lambda, gene_l2_lambda, seed_l2_lambda, rel_gene_l2_lambda, rel_seed_l2_lambda]
optim_params: dict of optimization parameters
test_inds: list of tuples specifying indices in the LFC data matrices to set aside for testing (set to None if not using xval)
log_file: path of log file
'''
self.min_hairpins_per = 2 #minimum number of hairpins per gene/seed to use for estimation of gene/seed effects
self.min_slope = 0.01 #minimum slope term (prevents them from getting set to 0 during optimization)
reg_params = update_param_dict(self.default_reg_params(), reg_params)
self.reg_params = reg_params
optim_params = update_param_dict(self.default_optim_params(), optim_params)
self.optim_params = optim_params
if data_names is not None:
self.data_names = data_names
self.log_file = log_file
if self.log_file is not None:
self._log_file = open(log_file, 'w')
else:
self._log_file = None
#init containers for storing stats across training iters
self.R2_vals = {'train': [], 'test': [], 'train_ms': [], 'test_ms': []} #store R2 evals in a dict
self.loss_evals = []
self.SSMD = {'train': [], 'test': []}
self.gene_sets = gene_sets
if self.optim_params['precision'] == 'double':
self.float = tf.float64
elif self.optim_params['precision'] == 'single':
self.float = tf.float32
else:
raise('invalid float type')
self.test_inds = test_inds
self.all_CL_names = get_CL_names(LFC_mats)
self.all_CL_batches = get_CL_batches(LFC_mats)
self.all_hp_seqs = get_hp_names(LFC_mats)
self.all_hp_batches = get_hp_batches(LFC_mats)
self.n_CLs = len(data_names['CLs'])
self.n_CL_batches = len(self.all_CL_names)
self.n_hp_batches = len(self.all_hp_seqs)
#BUILD GRAPH
self.g = tf.Graph()
self.sess = tf.Session(graph = self.g)
with self.g.as_default():
self.n_hairpins, self.n_genes = gene_matrix.shape
_, self.n_seeds = seed_matrix.shape
#calculate number of genes and seeds with data for each CL
self.n_used_hairpins_per_gene = compute_hairpins_per_gene_CL(
LFC_mats, gene_matrix, data_names['hps'], data_names['CLs'], data_names['genes'])
self.n_targeted_genes = np.sum(self.n_used_hairpins_per_gene >= self.min_hairpins_per, axis = 0)
self.n_used_hairpins_per_seed = compute_hairpins_per_gene_CL(
LFC_mats, seed_matrix, data_names['hps'], data_names['CLs'], data_names['seeds'])
self.n_targeted_seeds = np.sum(self.n_used_hairpins_per_seed >= self.min_hairpins_per, axis = 0)
#define parameter inits
init_params = {
'gene_score': tf.zeros([self.n_CLs, self.n_genes], self.float),
'seed_score': tf.zeros([self.n_CLs, self.n_seeds], self.float),
'gene_score_avgs': tf.zeros([1, self.n_genes], self.float),
'seed_score_avgs': tf.zeros([1, self.n_seeds], self.float),
'CL_offset': tf.zeros([self.n_CL_batches, 1], self.float),
'CL_slope': tf.ones([self.n_CL_batches, 1], self.float),
'gene_slope': tf.ones([self.n_CLs, 1], self.float),
'CL_noise_vars': tf.ones([self.n_CL_batches, 1], self.float),
'hairpin_offset': tf.zeros([self.n_hp_batches, 1], self.float),
'hairpin_unpred': tf.zeros([self.n_hairpins, 1], self.float),
'guide_Geff': tf.ones([self.n_hairpins, 1], self.float),
'guide_Seff': tf.ones([self.n_hairpins, 1], self.float)
}
self.obs = [tf.placeholder(self.float, dset.shape, name = "obs_" + str(ii)) \
for ii, dset in enumerate(LFC_mats)]
self.eval_mask = [tf.placeholder('bool', dset.shape, name = "eval_mask_" + str(ii)) \
for ii, dset in enumerate(LFC_mats)]
#Define variables
self.gene_score = tf.Variable(init_params['gene_score'], dtype = self.float, name = 'gene_score')
self.seed_score = tf.Variable(init_params['seed_score'], dtype = self.float, name = 'seed_score')
self.gene_score_avgs = tf.Variable(init_params['gene_score_avgs'], dtype = self.float, name = 'gene_score_avgs')
self.seed_score_avgs = tf.Variable(init_params['seed_score_avgs'], dtype = self.float, name = 'seed_score_avgs')
self.CL_offset = tf.Variable(init_params['CL_offset'], dtype = self.float, name = 'CL_offset')
self.CL_slope = tf.Variable(init_params['CL_slope'], dtype = self.float, name = 'CL_slope')
self.gene_slope = tf.Variable(init_params['gene_slope'], dtype = self.float, name = 'gene_slope')
self.hairpin_offset = tf.Variable(init_params['hairpin_offset'], dtype = self.float, name = 'hairpin_offset')
self.hairpin_unpred = tf.Variable(init_params['hairpin_unpred'], dtype = self.float, name = 'hairpin_offset')
self.guide_Geff = tf.Variable(init_params['guide_Geff'], dtype = self.float, name = 'guide_Geff')
self.guide_Seff = tf.Variable(init_params['guide_Seff'], dtype = self.float, name = 'guide_Seff')
self.CL_noise_vars = tf.Variable(init_params['CL_noise_vars'], dtype = self.float, name = 'noise_vars')
self.n_Geffs = self.n_hairpins
self.n_Seffs = self.n_hairpins
#maps from name to index value
self.hp_ind_map = {name: ind for ind, name in enumerate(data_names['hps'])}
self.CL_ind_map = {name: ind for ind, name in enumerate(data_names['CLs'])}
#make list of sparse gene and seed maps for each LFC dataset
gene_maps = [self.make_sparse_submap(gene_matrix, LFC_mat.index.values) for LFC_mat in LFC_mats]
seed_maps = [self.make_sparse_submap(seed_matrix, LFC_mat.index.values) for LFC_mat in LFC_mats]
#op that is the per-CL gene effect scaled by gene-KD slope (used for re-estimating gene slope)
self.ind_gene_effects = tf.multiply(self.gene_score_avgs + self.gene_score, self.gene_slope)
#package a dict of the model params
mod_params = {
'CL_noise_vars': self.CL_noise_vars,
'CL_offset': self.CL_offset,
'CL_slope': self.CL_slope,
'guide_Seff': self.guide_Seff,
'guide_Geff': self.guide_Geff,
'hairpin_offset': self.hairpin_offset,
'hairpin_unpred': self.hairpin_unpred,
'gene_slope': self.gene_slope,
'seed_score_avgs': self.seed_score_avgs,
'seed_score': self.seed_score,
'gene_score_avgs': self.gene_score_avgs,
'gene_score': self.gene_score}
#LOOP OVER DATASETS AND BUILD SUBGRAPH FOR EACH
dataset_nLLs = []
dataset_SS = []
self.shRNA_R2 = []
self.shRNA_nLL = []
self.shRNA_oSS = []
self.pred = []
hp_offset = 0
CL_offset = 0
for ii in range(len(self.obs)):
cur_pred = self.get_dataset_pred(
mod_params,
gene_maps[ii],
seed_maps[ii],
LFC_mats[ii].index.values,
LFC_mats[ii].columns.values,
hp_offset,
CL_offset)
cur_nLL, cur_SS = self.get_dataset_LL(
mod_params,
self.obs[ii],
cur_pred,
self.eval_mask[ii],
CL_offset)
cur_shRNA_R2, cur_shRNA_nLL, cur_shRNA_SS = self.get_shRNA_R2(
mod_params,
self.obs[ii],
cur_pred,
CL_offset)
self.shRNA_R2.append(cur_shRNA_R2)
self.shRNA_nLL.append(cur_shRNA_nLL)
self.shRNA_oSS.append(cur_shRNA_SS)
dataset_nLLs.append(cur_nLL)
dataset_SS.append(cur_SS)
self.pred.append(cur_pred)
hp_offset += LFC_mats[ii].shape[0]
CL_offset += LFC_mats[ii].shape[1]
self.nLL = tf.add_n(dataset_nLLs) #sum negative log-like across datasets
tot_SS = tf.add_n(dataset_SS) #sum squared error
#LOOP OVER DATASETS AND BUILD GENE-AVG SUBGRAPHS
dataset_avg_nLLs = []
hp_offset = 0
CL_offset = 0
for ii in range(len(self.obs)):
cur_pred = self.get_dataset_pred(
mod_params,
gene_maps[ii],
seed_maps[ii],
LFC_mats[ii].index.values,
LFC_mats[ii].columns.values,
hp_offset,
CL_offset,
just_avg_scores = True)
cur_nLL, cur_SS = | |
except KeyError:
if base_mod is not None and code not in _numeric_corrected:
return base_mod._numeric[code]
else:
raise
_toupper = {
604: 42923,
609: 42924,
613: 42893,
614: 42922,
618: 42926,
620: 42925,
647: 42929,
669: 42930,
670: 42928,
1011: 895,
1319: 1318,
1321: 1320,
1323: 1322,
1325: 1324,
1327: 1326,
4304: 7312,
4305: 7313,
4306: 7314,
4307: 7315,
4308: 7316,
4309: 7317,
4310: 7318,
4311: 7319,
4312: 7320,
4313: 7321,
4314: 7322,
4315: 7323,
4316: 7324,
4317: 7325,
4318: 7326,
4319: 7327,
4320: 7328,
4321: 7329,
4322: 7330,
4323: 7331,
4324: 7332,
4325: 7333,
4326: 7334,
4327: 7335,
4328: 7336,
4329: 7337,
4330: 7338,
4331: 7339,
4332: 7340,
4333: 7341,
4334: 7342,
4335: 7343,
4336: 7344,
4337: 7345,
4338: 7346,
4339: 7347,
4340: 7348,
4341: 7349,
4342: 7350,
4343: 7351,
4344: 7352,
4345: 7353,
4346: 7354,
4349: 7357,
4350: 7358,
4351: 7359,
5112: 5104,
5113: 5105,
5114: 5106,
5115: 5107,
5116: 5108,
5117: 5109,
7296: 1042,
7297: 1044,
7298: 1054,
7299: 1057,
7300: 1058,
7301: 1058,
7302: 1066,
7303: 1122,
7304: 42570,
11507: 11506,
11559: 4295,
11565: 4301,
42593: 42592,
42649: 42648,
42651: 42650,
42897: 42896,
42899: 42898,
42903: 42902,
42905: 42904,
42907: 42906,
42909: 42908,
42911: 42910,
42913: 42912,
42915: 42914,
42917: 42916,
42919: 42918,
42921: 42920,
42933: 42932,
42935: 42934,
42937: 42936,
43859: 42931,
43888: 5024,
43889: 5025,
43890: 5026,
43891: 5027,
43892: 5028,
43893: 5029,
43894: 5030,
43895: 5031,
43896: 5032,
43897: 5033,
43898: 5034,
43899: 5035,
43900: 5036,
43901: 5037,
43902: 5038,
43903: 5039,
43904: 5040,
43905: 5041,
43906: 5042,
43907: 5043,
43908: 5044,
43909: 5045,
43910: 5046,
43911: 5047,
43912: 5048,
43913: 5049,
43914: 5050,
43915: 5051,
43916: 5052,
43917: 5053,
43918: 5054,
43919: 5055,
43920: 5056,
43921: 5057,
43922: 5058,
43923: 5059,
43924: 5060,
43925: 5061,
43926: 5062,
43927: 5063,
43928: 5064,
43929: 5065,
43930: 5066,
43931: 5067,
43932: 5068,
43933: 5069,
43934: 5070,
43935: 5071,
43936: 5072,
43937: 5073,
43938: 5074,
43939: 5075,
43940: 5076,
43941: 5077,
43942: 5078,
43943: 5079,
43944: 5080,
43945: 5081,
43946: 5082,
43947: 5083,
43948: 5084,
43949: 5085,
43950: 5086,
43951: 5087,
43952: 5088,
43953: 5089,
43954: 5090,
43955: 5091,
43956: 5092,
43957: 5093,
43958: 5094,
43959: 5095,
43960: 5096,
43961: 5097,
43962: 5098,
43963: 5099,
43964: 5100,
43965: 5101,
43966: 5102,
43967: 5103,
66776: 66736,
66777: 66737,
66778: 66738,
66779: 66739,
66780: 66740,
66781: 66741,
66782: 66742,
66783: 66743,
66784: 66744,
66785: 66745,
66786: 66746,
66787: 66747,
66788: 66748,
66789: 66749,
66790: 66750,
66791: 66751,
66792: 66752,
66793: 66753,
66794: 66754,
66795: 66755,
66796: 66756,
66797: 66757,
66798: 66758,
66799: 66759,
66800: 66760,
66801: 66761,
66802: 66762,
66803: 66763,
66804: 66764,
66805: 66765,
66806: 66766,
66807: 66767,
66808: 66768,
66809: 66769,
66810: 66770,
66811: 66771,
68800: 68736,
68801: 68737,
68802: 68738,
68803: 68739,
68804: 68740,
68805: 68741,
68806: 68742,
68807: 68743,
68808: 68744,
68809: 68745,
68810: 68746,
68811: 68747,
68812: 68748,
68813: 68749,
68814: 68750,
68815: 68751,
68816: 68752,
68817: 68753,
68818: 68754,
68819: 68755,
68820: 68756,
68821: 68757,
68822: 68758,
68823: 68759,
68824: 68760,
68825: 68761,
68826: 68762,
68827: 68763,
68828: 68764,
68829: 68765,
68830: 68766,
68831: 68767,
68832: 68768,
68833: 68769,
68834: 68770,
68835: 68771,
68836: 68772,
68837: 68773,
68838: 68774,
68839: 68775,
68840: 68776,
68841: 68777,
68842: 68778,
68843: 68779,
68844: 68780,
68845: 68781,
68846: 68782,
68847: 68783,
68848: 68784,
68849: 68785,
68850: 68786,
71872: 71840,
71873: 71841,
71874: 71842,
71875: 71843,
71876: 71844,
71877: 71845,
71878: 71846,
71879: 71847,
71880: 71848,
71881: 71849,
71882: 71850,
71883: 71851,
71884: 71852,
71885: 71853,
71886: 71854,
71887: 71855,
71888: 71856,
71889: 71857,
71890: 71858,
71891: 71859,
71892: 71860,
71893: 71861,
71894: 71862,
71895: 71863,
71896: 71864,
71897: 71865,
71898: 71866,
71899: 71867,
71900: 71868,
71901: 71869,
71902: 71870,
71903: 71871,
93792: 93760,
93793: 93761,
93794: 93762,
93795: 93763,
93796: 93764,
93797: 93765,
93798: 93766,
93799: 93767,
93800: 93768,
93801: 93769,
93802: 93770,
93803: 93771,
93804: 93772,
93805: 93773,
93806: 93774,
93807: 93775,
93808: 93776,
93809: 93777,
93810: 93778,
93811: 93779,
93812: 93780,
93813: 93781,
93814: 93782,
93815: 93783,
93816: 93784,
93817: 93785,
93818: 93786,
93819: 93787,
93820: 93788,
93821: 93789,
93822: 93790,
93823: 93791,
125218: 125184,
125219: 125185,
125220: 125186,
125221: 125187,
125222: 125188,
125223: 125189,
125224: 125190,
125225: 125191,
125226: 125192,
125227: 125193,
125228: 125194,
125229: 125195,
125230: 125196,
125231: 125197,
125232: 125198,
125233: 125199,
125234: 125200,
125235: 125201,
125236: 125202,
125237: 125203,
125238: 125204,
125239: 125205,
125240: 125206,
125241: 125207,
125242: 125208,
125243: 125209,
125244: 125210,
125245: 125211,
125246: 125212,
125247: 125213,
125248: 125214,
125249: 125215,
125250: 125216,
125251: 125217,
}
_toupper_corrected = {
}
_tolower = {
895: 1011,
1318: 1319,
1320: 1321,
1322: 1323,
1324: 1325,
1326: 1327,
4295: 11559,
4301: 11565,
5024: 43888,
5025: 43889,
5026: 43890,
5027: 43891,
5028: 43892,
5029: 43893,
5030: 43894,
5031: 43895,
5032: 43896,
5033: 43897,
5034: 43898,
5035: 43899,
5036: 43900,
5037: 43901,
5038: 43902,
5039: 43903,
5040: 43904,
5041: 43905,
5042: 43906,
5043: 43907,
5044: 43908,
5045: 43909,
5046: 43910,
5047: 43911,
5048: 43912,
5049: 43913,
5050: 43914,
5051: 43915,
5052: 43916,
5053: 43917,
5054: 43918,
5055: 43919,
5056: 43920,
5057: 43921,
5058: 43922,
5059: 43923,
5060: 43924,
5061: 43925,
5062: 43926,
5063: 43927,
5064: 43928,
5065: 43929,
5066: 43930,
5067: 43931,
5068: 43932,
5069: 43933,
5070: 43934,
5071: 43935,
5072: 43936,
5073: 43937,
5074: 43938,
5075: 43939,
5076: 43940,
5077: 43941,
5078: 43942,
5079: 43943,
5080: 43944,
5081: 43945,
5082: 43946,
5083: 43947,
5084: 43948,
5085: 43949,
5086: 43950,
5087: 43951,
5088: 43952,
5089: 43953,
5090: 43954,
5091: 43955,
5092: 43956,
5093: 43957,
5094: 43958,
5095: 43959,
5096: 43960,
5097: 43961,
5098: 43962,
5099: 43963,
5100: 43964,
5101: 43965,
5102: 43966,
5103: 43967,
5104: 5112,
5105: 5113,
5106: 5114,
5107: 5115,
5108: 5116,
5109: 5117,
7312: 4304,
7313: 4305,
7314: 4306,
7315: 4307,
7316: 4308,
7317: 4309,
7318: 4310,
7319: 4311,
7320: 4312,
7321: 4313,
7322: 4314,
7323: 4315,
7324: 4316,
7325: 4317,
7326: 4318,
7327: 4319,
7328: 4320,
7329: 4321,
7330: 4322,
7331: 4323,
7332: 4324,
7333: 4325,
7334: 4326,
7335: 4327,
7336: 4328,
7337: 4329,
7338: 4330,
7339: 4331,
7340: 4332,
7341: 4333,
7342: 4334,
7343: 4335,
7344: 4336,
7345: 4337,
7346: 4338,
7347: 4339,
7348: 4340,
7349: 4341,
7350: 4342,
7351: 4343,
7352: 4344,
7353: 4345,
7354: 4346,
7357: 4349,
7358: 4350,
7359: 4351,
11506: 11507,
42592: 42593,
42648: 42649,
42650: 42651,
42893: 613,
42896: 42897,
42898: 42899,
42902: 42903,
42904: 42905,
42906: 42907,
42908: 42909,
42910: 42911,
42912: 42913,
42914: 42915,
42916: 42917,
42918: 42919,
42920: 42921,
42922: 614,
42923: 604,
42924: 609,
42925: 620,
42926: 618,
42928: 670,
42929: 647,
42930: 669,
42931: 43859,
42932: 42933,
42934: 42935,
42936: 42937,
66736: 66776,
66737: 66777,
66738: 66778,
66739: 66779,
66740: 66780,
66741: 66781,
66742: 66782,
66743: 66783,
66744: 66784,
66745: 66785,
66746: 66786,
66747: 66787,
66748: 66788,
66749: 66789,
66750: 66790,
66751: 66791,
66752: 66792,
66753: 66793,
66754: 66794,
66755: 66795,
66756: 66796,
66757: 66797,
66758: 66798,
66759: 66799,
66760: 66800,
66761: 66801,
66762: 66802,
66763: 66803,
66764: 66804,
66765: 66805,
66766: 66806,
66767: 66807,
66768: 66808,
66769: 66809,
66770: 66810,
66771: 66811,
68736: 68800,
68737: 68801,
68738: 68802,
68739: 68803,
68740: 68804,
68741: 68805,
68742: 68806,
68743: 68807,
68744: 68808,
68745: 68809,
68746: 68810,
68747: 68811,
68748: 68812,
68749: 68813,
68750: 68814,
68751: 68815,
68752: 68816,
68753: 68817,
68754: 68818,
68755: 68819,
68756: 68820,
68757: 68821,
68758: 68822,
68759: 68823,
68760: 68824,
68761: 68825,
68762: 68826,
68763: 68827,
68764: 68828,
68765: 68829,
68766: 68830,
68767: 68831,
68768: 68832,
68769: 68833,
68770: 68834,
68771: 68835,
68772: 68836,
68773: 68837,
68774: 68838,
68775: 68839,
68776: 68840,
68777: 68841,
68778: 68842,
68779: 68843,
68780: 68844,
68781: 68845,
68782: 68846,
68783: 68847,
68784: 68848,
68785: 68849,
68786: 68850,
71840: 71872,
71841: 71873,
71842: 71874,
71843: 71875,
71844: 71876,
71845: 71877,
71846: 71878,
71847: 71879,
71848: 71880,
71849: 71881,
71850: 71882,
71851: 71883,
71852: 71884,
71853: 71885,
71854: 71886,
71855: 71887,
71856: 71888,
71857: 71889,
71858: 71890,
71859: 71891,
71860: 71892,
71861: 71893,
71862: 71894,
71863: 71895,
71864: 71896,
71865: 71897,
71866: 71898,
71867: 71899,
71868: 71900,
71869: 71901,
71870: 71902,
71871: 71903,
93760: 93792,
93761: 93793,
93762: 93794,
93763: 93795,
93764: 93796,
93765: 93797,
93766: 93798,
93767: 93799,
93768: 93800,
93769: 93801,
93770: 93802,
93771: 93803,
93772: 93804,
93773: 93805,
93774: 93806,
93775: 93807,
93776: 93808,
93777: 93809,
93778: 93810,
93779: 93811,
93780: 93812,
93781: 93813,
93782: 93814,
93783: 93815,
93784: 93816,
93785: 93817,
93786: 93818,
93787: 93819,
93788: 93820,
93789: 93821,
93790: 93822,
93791: 93823,
125184: 125218,
125185: 125219,
125186: 125220,
125187: 125221,
125188: 125222,
125189: 125223,
125190: 125224,
125191: 125225,
125192: 125226,
125193: 125227,
125194: 125228,
125195: 125229,
125196: 125230,
125197: 125231,
125198: 125232,
125199: 125233,
125200: 125234,
125201: 125235,
125202: 125236,
125203: 125237,
125204: 125238,
125205: 125239,
125206: 125240,
125207: 125241,
125208: 125242,
125209: 125243,
125210: 125244,
125211: 125245,
125212: 125246,
125213: 125247,
125214: 125248,
125215: 125249,
125216: 125250,
125217: 125251,
}
_tolower_corrected = {
}
_totitle = {
604: 42923,
609: 42924,
613: 42893,
614: 42922,
618: 42926,
620: 42925,
647: 42929,
669: 42930,
670: 42928,
1011: 895,
1319: 1318,
1321: 1320,
1323: 1322,
1325: 1324,
1327: 1326,
4304: 4304,
4305: 4305,
4306: 4306,
4307: 4307,
4308: 4308,
4309: 4309,
4310: 4310,
4311: 4311,
4312: 4312,
4313: 4313,
4314: 4314,
4315: 4315,
4316: 4316,
4317: 4317,
4318: 4318,
4319: 4319,
4320: 4320,
4321: 4321,
4322: 4322,
4323: 4323,
4324: 4324,
4325: 4325,
4326: 4326,
4327: 4327,
4328: 4328,
4329: 4329,
4330: 4330,
4331: 4331,
4332: 4332,
4333: 4333,
4334: 4334,
4335: 4335,
4336: 4336,
4337: 4337,
4338: 4338,
4339: 4339,
4340: 4340,
4341: 4341,
4342: 4342,
4343: 4343,
4344: 4344,
4345: 4345,
4346: 4346,
4349: 4349,
4350: 4350,
4351: 4351,
5112: 5104,
5113: 5105,
5114: 5106,
5115: 5107,
5116: 5108,
5117: 5109,
7296: 1042,
7297: 1044,
7298: 1054,
7299: 1057,
7300: 1058,
7301: 1058,
7302: 1066,
7303: 1122,
7304: 42570,
11507: 11506,
11559: 4295,
11565: 4301,
42593: 42592,
42649: 42648,
42651: 42650,
42897: 42896,
42899: 42898,
42903: 42902,
42905: 42904,
42907: 42906,
42909: 42908,
42911: 42910,
42913: 42912,
42915: 42914,
42917: 42916,
42919: 42918,
42921: 42920,
42933: 42932,
42935: 42934,
42937: 42936,
43859: 42931,
43888: 5024,
43889: 5025,
43890: 5026,
43891: 5027,
43892: 5028,
43893: 5029,
43894: 5030,
43895: 5031,
43896: 5032,
43897: 5033,
43898: 5034,
43899: 5035,
43900: 5036,
43901: 5037,
43902: 5038,
43903: 5039,
43904: 5040,
43905: 5041,
43906: 5042,
43907: 5043,
43908: 5044,
43909: 5045,
43910: 5046,
43911: 5047,
43912: 5048,
43913: 5049,
43914: 5050,
43915: 5051,
43916: 5052,
43917: 5053,
43918: 5054,
43919: 5055,
43920: 5056,
43921: 5057,
43922: 5058,
43923: 5059,
43924: 5060,
43925: 5061,
43926: 5062,
43927: 5063,
43928: 5064,
43929: 5065,
43930: 5066,
43931: 5067,
43932: 5068,
43933: 5069,
43934: 5070,
43935: 5071,
43936: 5072,
43937: 5073,
43938: 5074,
43939: 5075,
43940: 5076,
43941: 5077,
43942: 5078,
43943: 5079,
43944: 5080,
43945: 5081,
43946: 5082,
43947: 5083,
43948: 5084,
43949: 5085,
43950: 5086,
43951: 5087,
43952: 5088,
43953: 5089,
43954: 5090,
43955: 5091,
43956: 5092,
43957: 5093,
43958: 5094,
43959: 5095,
43960: 5096,
43961: 5097,
43962: 5098,
43963: 5099,
43964: 5100,
43965: 5101,
43966: 5102,
43967: 5103,
66776: 66736,
66777: 66737,
66778: 66738,
66779: 66739,
66780: 66740,
66781: 66741,
66782: 66742,
66783: 66743,
66784: 66744,
66785: 66745,
66786: 66746,
66787: 66747,
66788: 66748,
66789: 66749,
66790: 66750,
66791: 66751,
66792: 66752,
66793: 66753,
66794: 66754,
66795: 66755,
66796: 66756,
66797: 66757,
66798: 66758,
66799: 66759,
66800: 66760,
66801: 66761,
66802: 66762,
66803: 66763,
66804: 66764,
66805: 66765,
66806: 66766,
66807: 66767,
66808: 66768,
66809: 66769,
66810: 66770,
66811: 66771,
68800: 68736,
68801: 68737,
68802: 68738,
68803: 68739,
68804: 68740,
68805: 68741,
68806: 68742,
68807: 68743,
68808: 68744,
68809: 68745,
68810: 68746,
68811: 68747,
68812: 68748,
68813: 68749,
68814: 68750,
68815: 68751,
68816: 68752,
68817: 68753,
68818: 68754,
68819: 68755,
68820: 68756,
68821: 68757,
68822: 68758,
68823: 68759,
68824: 68760,
68825: 68761,
68826: 68762,
68827: 68763,
68828: 68764,
68829: 68765,
68830: 68766,
68831: 68767,
68832: 68768,
68833: 68769,
68834: 68770,
68835: 68771,
68836: 68772,
68837: 68773,
68838: 68774,
68839: 68775,
68840: 68776,
68841: 68777,
68842: 68778,
68843: 68779,
68844: 68780,
68845: 68781,
68846: 68782,
68847: 68783,
68848: 68784,
68849: 68785,
68850: 68786,
71872: 71840,
71873: 71841,
71874: 71842,
71875: 71843,
71876: 71844,
71877: 71845,
71878: 71846,
71879: 71847,
71880: 71848,
71881: 71849,
71882: 71850,
71883: 71851,
71884: 71852,
71885: 71853,
71886: 71854,
71887: 71855,
71888: 71856,
71889: 71857,
71890: 71858,
71891: 71859,
71892: 71860,
71893: 71861,
71894: 71862,
71895: 71863,
71896: 71864,
71897: 71865,
71898: 71866,
71899: 71867,
71900: 71868,
71901: 71869,
71902: 71870,
71903: 71871,
93792: 93760,
93793: 93761,
93794: 93762,
93795: 93763,
93796: 93764,
93797: 93765,
93798: 93766,
93799: 93767,
93800: 93768,
93801: 93769,
93802: 93770,
93803: 93771,
93804: 93772,
93805: 93773,
93806: 93774,
93807: 93775,
93808: 93776,
93809: 93777,
93810: 93778,
93811: 93779,
93812: 93780,
93813: 93781,
93814: 93782,
93815: 93783,
93816: 93784,
93817: 93785,
93818: 93786,
93819: 93787,
93820: 93788,
93821: 93789,
93822: 93790,
93823: 93791,
125218: 125184,
125219: 125185,
125220: 125186,
125221: 125187,
125222: 125188,
125223: 125189,
125224: 125190,
125225: 125191,
125226: 125192,
125227: 125193,
125228: 125194,
125229: 125195,
125230: 125196,
125231: 125197,
125232: 125198,
125233: 125199,
125234: 125200,
125235: 125201,
125236: 125202,
125237: 125203,
125238: 125204,
125239: 125205,
125240: 125206,
125241: 125207,
125242: 125208,
125243: 125209,
125244: 125210,
125245: 125211,
125246: 125212,
125247: 125213,
125248: 125214,
125249: 125215,
125250: 125216,
125251: 125217,
}
_totitle_corrected = {
}
_special_casing = {
}
_special_casing_corrected = {
}
def toupper(code):
try:
return _toupper[code]
except KeyError:
if base_mod is not None and code not in _toupper_corrected:
return base_mod._toupper.get(code, code)
else:
return code
def tolower(code):
try:
return _tolower[code]
except KeyError:
if base_mod is not None and code not in _tolower_corrected:
return base_mod._tolower.get(code, code)
else:
return code
def totitle(code):
try:
return _totitle[code]
except KeyError:
if base_mod is not None and code not in _totitle_corrected:
return base_mod._totitle.get(code, code)
else:
return code
def toupper_full(code):
try:
return _special_casing[code][2]
except KeyError:
if base_mod is not None and code not in _special_casing_corrected:
try:
return base_mod._special_casing[code][2]
except KeyError:
pass
return [toupper(code)]
def tolower_full(code):
try:
return _special_casing[code][0]
except KeyError:
if base_mod is not None and code not in _special_casing_corrected:
try:
return base_mod._special_casing[code][0]
except KeyError:
pass
return [tolower(code)]
def totitle_full(code):
try:
return _special_casing[code][1]
except KeyError:
if base_mod is not None and code not in _special_casing_corrected:
try:
return base_mod._special_casing[code][1]
except KeyError:
pass
return [totitle(code)]
_raw_decomposition = {
8341: '<sub> 0068',
8342: '<sub> 006B',
8343: '<sub> 006C',
8344: '<sub> 006D',
8345: '<sub> 006E',
8346: '<sub> 0070',
8347: '<sub> 0073',
8348: '<sub> 0074',
42652: '<super> 044A',
42653: '<super> 044C',
43000: '<super> 0126',
43001: '<super> 0153',
43868: '<super> A727',
43869: '<super> AB37',
43870: | |
the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rotate_server_ca),
'__call__') as call:
client.rotate_server_ca()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesRotateServerCaRequest()
@pytest.mark.asyncio
async def test_rotate_server_ca_async(transport: str = 'grpc_asyncio', request_type=cloud_sql.SqlInstancesRotateServerCaRequest):
client = SqlInstancesServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rotate_server_ca),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
))
response = await client.rotate_server_ca(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesRotateServerCaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
@pytest.mark.asyncio
async def test_rotate_server_ca_async_from_dict():
await test_rotate_server_ca_async(request_type=dict)
def test_start_replica(transport: str = 'grpc', request_type=cloud_sql.SqlInstancesStartReplicaRequest):
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_replica),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
)
response = client.start_replica(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesStartReplicaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
def test_start_replica_from_dict():
test_start_replica(request_type=dict)
def test_start_replica_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_replica),
'__call__') as call:
client.start_replica()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesStartReplicaRequest()
@pytest.mark.asyncio
async def test_start_replica_async(transport: str = 'grpc_asyncio', request_type=cloud_sql.SqlInstancesStartReplicaRequest):
client = SqlInstancesServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_replica),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
))
response = await client.start_replica(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesStartReplicaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
@pytest.mark.asyncio
async def test_start_replica_async_from_dict():
await test_start_replica_async(request_type=dict)
def test_stop_replica(transport: str = 'grpc', request_type=cloud_sql.SqlInstancesStopReplicaRequest):
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_replica),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
)
response = client.stop_replica(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesStopReplicaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
def test_stop_replica_from_dict():
test_stop_replica(request_type=dict)
def test_stop_replica_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_replica),
'__call__') as call:
client.stop_replica()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesStopReplicaRequest()
@pytest.mark.asyncio
async def test_stop_replica_async(transport: str = 'grpc_asyncio', request_type=cloud_sql.SqlInstancesStopReplicaRequest):
client = SqlInstancesServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_replica),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
))
response = await client.stop_replica(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesStopReplicaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
@pytest.mark.asyncio
async def test_stop_replica_async_from_dict():
await test_stop_replica_async(request_type=dict)
def test_truncate_log(transport: str = 'grpc', request_type=cloud_sql.SqlInstancesTruncateLogRequest):
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.truncate_log),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
)
response = client.truncate_log(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesTruncateLogRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
def test_truncate_log_from_dict():
test_truncate_log(request_type=dict)
def test_truncate_log_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.truncate_log),
'__call__') as call:
client.truncate_log()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesTruncateLogRequest()
@pytest.mark.asyncio
async def test_truncate_log_async(transport: str = 'grpc_asyncio', request_type=cloud_sql.SqlInstancesTruncateLogRequest):
client = SqlInstancesServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.truncate_log),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
))
response = await client.truncate_log(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, | |
<filename>rspub/core/transport.py
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
:samp:`Transport resources and sitemaps to the web server`
"""
import logging
import os
import shutil
import socket
import tempfile
import urllib.parse
from enum import Enum
from glob import glob
import paramiko
from resync import ChangeList
from resync import ResourceList
from resync.list_base_with_index import ListBaseWithIndex
from resync.sitemap import Sitemap
from scp import SCPClient, SCPException
from rspub.core.rs_paras import RsParameters
from rspub.util.observe import Observable, ObserverInterruptException
LOG = logging.getLogger(__name__)
class TransportEvent(Enum):
"""
:samp:`Events fired by {Transport}`
All events are broadcast in the format::
[inform][confirm](source, event, **kwargs)
where ``source`` is the calling instance, ``event`` is the relevant event and ``**kwargs`` hold relevant
information about the event.
"""
copy_resource = 1
"""
``1`` ``inform`` :samp:`A resource was copied to a temporary location`
"""
copy_sitemap = 2
"""
``2`` ``inform`` :samp:`A sitemap was copied to a temporary location`
"""
copy_file = 3
"""
``3`` ``confirm`` :samp:`Copy file confirm message with interrupt`
"""
transfer_file = 4
"""
``4`` ``confirm`` :samp:`Transfer file confirm message with interrupt`
"""
resource_not_found = 10
"""
``10`` ``inform`` :samp:`A resource was not found`
"""
start_copy_to_temp = 15
"""
``15`` ``inform`` :samp:`Start copy resources and sitemaps to temporary directory`
"""
zip_resources = 20
"""
``20`` ``inform`` :samp:`Start packaging resources and sitemaps`
"""
scp_resources = 21
"""
``21`` ``inform`` :samp:`Start transfer of files with scp`
"""
ssh_client_creation = 22
"""
``22`` ``inform`` :samp:`Trying to create ssh client`
"""
scp_exception = 23
"""
``23`` ``inform`` :samp:`Encountered exception while transferring files with scp`
"""
scp_progress = 24
"""
``24`` ``inform`` :samp:`Progress as defined by SCPClient`
"""
scp_transfer_complete = 25
"""
``25`` ``inform`` :samp:`Transfer of one file complete`
"""
transport_start = 30
"""
``30`` ``inform`` :samp:`Transport started`
"""
transport_end = 31
"""
``31`` ``inform`` :samp:`Transport ended`
"""
class ResourceAuditorEvent(Enum):
"""
:samp:`Events fired by {Transport}`
All events are broadcast in the format::
[inform](source, event, **kwargs)
where ``source`` is the calling instance, ``event`` is the relevant event and ``**kwargs`` hold relevant
information about the event.
"""
site_map_not_found = 11
"""
``11`` inform`` :samp:`A sitemap was not found`
"""
class ResourceAuditor(Observable):
def __init__(self, paras):
Observable.__init__(self)
assert isinstance(paras, RsParameters)
self.paras = paras
self.count_errors = 0
def all_resources(self):
all_resources = {}
# search for resourcelists
resourcelist_files = sorted(glob(self.paras.abs_metadata_path("resourcelist_*.xml")))
for rl_file_name in resourcelist_files:
resourcelist = ResourceList()
with open(rl_file_name, "r", encoding="utf-8") as rl_file:
sm = Sitemap()
sm.parse_xml(rl_file, resources=resourcelist)
all_resources.update({resource.uri: resource for resource in resourcelist.resources})
# search for changelists
changelist_files = sorted(glob(self.paras.abs_metadata_path("changelist_*.xml")))
for cl_file_name in changelist_files:
changelist = ChangeList()
with open(cl_file_name, "r", encoding="utf-8") as cl_file:
sm = Sitemap()
sm.parse_xml(cl_file, resources=changelist)
for resource in changelist.resources:
if resource.change == "created" or resource.change == "updated":
all_resources.update({resource.uri: resource})
elif resource.change == "deleted" and resource.uri in all_resources:
del all_resources[resource.uri]
return all_resources
def all_resources_generator(self):
def generator():
for resource in self.all_resources().values():
path, relpath = self.extract_paths(resource.uri)
yield resource, path, relpath
return generator
def last_resources_generator(self):
def generator():
for file_name in self.paras.last_sitemaps:
listbase = ListBaseWithIndex()
if os.path.exists(file_name):
with open(file_name, "r", encoding="utf-8") as lb_file:
sm = Sitemap()
sm.parse_xml(lb_file, resources=listbase)
for resource in listbase.resources:
if resource.change is None or not resource.change == "deleted":
path, relpath = self.extract_paths(resource.uri)
yield resource, path, relpath
else:
LOG.warning("Unable to read sitemap: %s" % file_name)
self.count_errors += 1
self.observers_inform(self, ResourceAuditorEvent.site_map_not_found, file=file_name)
return generator
def extract_paths(self, uri):
relpath = os.path.relpath(uri, self.paras.url_prefix)
relpath = urllib.parse.unquote(relpath)
path = os.path.join(self.paras.resource_dir, relpath)
return path, relpath
def get_generator(self, all_resources):
if all_resources:
LOG.debug("Creating generator for all resources.")
generator = self.all_resources_generator()
else:
LOG.debug("Creating generator for last resources.")
generator = self.last_resources_generator()
return generator
class Transport(ResourceAuditor):
def __init__(self, paras):
ResourceAuditor.__init__(self, paras)
self.sshClient = None
self.count_resources = 0
self.count_sitemaps = 0
self.count_transfers = 0
def handle_resources(self, function, all_resources=False, include_description=True):
self.observers_inform(self, TransportEvent.start_copy_to_temp)
with tempfile.TemporaryDirectory(prefix="rspub.core.transport_") as tmpdirname:
LOG.info("Created temporary directory: %s" % tmpdirname)
self.__copy_resources(tmpdirname, all_resources)
self.__copy_metadata(tmpdirname)
if include_description:
self.__copy_description(tmpdirname)
function(tmpdirname)
def __copy_file(self, relpath, src, tmpdirname):
# LOG.debug("Copy file. relpath=%s src=%s" % (relpath, src))
if not self.observers_confirm(self, TransportEvent.copy_file, filename=src):
raise ObserverInterruptException("Process interrupted on TransportEvent.copy_file")
dest = os.path.join(tmpdirname, relpath)
dirs = os.path.dirname(dest)
os.makedirs(dirs, exist_ok=True)
shutil.copy2(src, dest)
def __copy_resources(self, tmpdirname, all_resources=False):
generator = self.get_generator(all_resources)
for resource, src, relpath in generator():
try:
self.__copy_file(relpath, src, tmpdirname)
self.count_resources += 1
self.observers_inform(self, TransportEvent.copy_resource, file=src,
count_resources=self.count_resources)
except FileNotFoundError:
LOG.exception("Unable to copy file %s", src)
self.count_errors += 1
self.observers_inform(self, ResourceAuditorEvent.resource_not_found, file=src)
def __copy_metadata(self, tmpdirname):
xml_files = glob(self.paras.abs_metadata_path("*.xml"))
for xml_file in xml_files:
relpath = os.path.relpath(xml_file, self.paras.resource_dir)
try:
self.__copy_file(relpath, xml_file, tmpdirname)
self.count_sitemaps += 1
self.observers_inform(self, TransportEvent.copy_sitemap, file=xml_file,
count_sitemaps=self.count_sitemaps)
except FileNotFoundError:
LOG.exception("Unable to copy file %s", xml_file)
self.count_errors += 1
self.observers_inform(self, ResourceAuditorEvent.site_map_not_found, file=xml_file)
def __copy_description(self, tmpdirname):
desc_file = self.paras.abs_description_path()
self.count_sitemaps += 1
if not self.paras.has_wellknown_at_root:
# description goes in metadata_dir
dest = os.path.join(tmpdirname, self.paras.metadata_dir, ".well-known", "resourcesync")
else:
# description should go at server root. should be moved at server if not correct. keep 1 zip file.
dest = os.path.join(tmpdirname, ".well-known", "resourcesync")
dirs = os.path.dirname(dest)
os.makedirs(dirs, exist_ok=True)
try:
shutil.copy2(desc_file, dest)
self.observers_inform(self, TransportEvent.copy_sitemap, file=desc_file,
count_sitemaps=self.count_sitemaps)
except FileNotFoundError:
LOG.exception("Unable to copy file %s", desc_file)
self.count_errors += 1
self.observers_inform(self, ResourceAuditorEvent.site_map_not_found, file=desc_file)
def __reset_counts(self):
self.count_resources = 0
self.count_sitemaps = 0
self.count_transfers = 0
self.count_errors = 0
#############
def zip_resources(self, all_resources=False):
self.__reset_counts()
self.observers_inform(self, TransportEvent.transport_start, mode="zip sources", all_resources=all_resources)
#
self.handle_resources(self.__function_zip, all_resources, include_description=True)
#
self.observers_inform(self, TransportEvent.transport_end, mode="zip sources",
count_resources=self.count_resources, count_sitemaps=self.count_sitemaps,
count_transfers=self.count_transfers, count_errors=self.count_errors)
def __function_zip(self, tmpdirname):
if self.count_resources + self.count_sitemaps > 0:
zip_name = os.path.splitext(self.paras.zip_filename)[0]
zip_dir = os.path.dirname(self.paras.zip_filename)
os.makedirs(zip_dir, exist_ok=True)
self.observers_inform(self, TransportEvent.zip_resources, zip_file=self.paras.zip_filename)
shutil.make_archive(zip_name, 'zip', tmpdirname)
LOG.info("Created zip archive: %s" % os.path.abspath(zip_name + ".zip"))
else:
LOG.info("Nothing to zip, not creating archive")
#############
# Password may not be needed with key-based authentication. See fi:
# https://www.digitalocean.com/community/tutorials/how-to-configure-ssh-key-based-authentication-on-a-linux-server
def scp_resources(self, all_resources=False, password="<PASSWORD>"):
self.__reset_counts()
self.observers_inform(self, TransportEvent.transport_start, mode="scp sources", all_resources=all_resources)
self.create_ssh_client(password)
#
try:
if self.sshClient:
if self.paras.has_wellknown_at_root:
include_description = False
self.count_sitemaps += 1
else:
include_description = True
self.handle_resources(self.__function_scp, all_resources, include_description=include_description)
if self.paras.has_wellknown_at_root:
self.__send_wellknown()
except Exception as err:
LOG.exception("Error while transfering files with scp")
self.count_errors += 1
self.observers_inform(self, TransportEvent.scp_exception, exception=str(err))
finally:
self.observers_inform(self, TransportEvent.transport_end, mode="scp sources",
count_resources=self.count_resources, count_sitemaps=self.count_sitemaps,
count_transfers=self.count_transfers, count_errors=self.count_errors)
def __send_wellknown(self):
files = self.paras.abs_description_path()
# .well-known directory can only be made by root.
# sudo mkdir .well-known
# sudo chmod -R a=rwx .well-known
# or if only one user copies to .well-known
# sudo chown user:group .well-known/
remote_path = self.paras.exp_scp_document_root + "/.well-known"
try:
self.scp_put(files, remote_path)
except FileNotFoundError:
LOG.exception("Unable to send file %s", files)
self.count_errors += 1
self.observers_inform(self, ResourceAuditorEvent.site_map_not_found, file=files)
def create_ssh_client(self, password):
if self.sshClient is None:
LOG.debug("Creating ssh client: server=%s, port=%d, user=%s" %
(self.paras.exp_scp_server, self.paras.exp_scp_port, self.paras.exp_scp_user))
self.observers_inform(self, TransportEvent.ssh_client_creation,
server=self.paras.exp_scp_server,
port=self.paras.exp_scp_port,
user=self.paras.exp_scp_user)
self.sshClient = paramiko.SSHClient()
self.sshClient.load_system_host_keys()
self.sshClient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.sshClient.connect(self.paras.exp_scp_server, self.paras.exp_scp_port, self.paras.exp_scp_user, password)
except paramiko.ssh_exception.AuthenticationException as err:
LOG.exception("Not authorized")
self.count_errors += 1
self.observers_inform(self, TransportEvent.scp_exception, exception=str(err))
self.sshClient = None
except socket.gaierror as err:
LOG.exception("Socket error")
self.count_errors += 1
self.observers_inform(self, TransportEvent.scp_exception, exception=str(err))
self.sshClient = None
except TimeoutError as err:
LOG.exception("Timeout")
self.count_errors += 1
self.observers_inform(self, TransportEvent.scp_exception, exception=str(err))
self.sshClient = None
def __function_scp(self, tmpdirname):
if self.count_resources + self.count_sitemaps > 0:
files = tmpdirname + os.sep
remote_path = self.paras.exp_scp_document_root + self.paras.server_path()
self.scp_put(files, remote_path)
LOG.info("Secure copied resources and metadata")
else:
LOG.info("Nothing to send, not transferring with scp to remote")
# files can be a single file, a directory, a list of files and/or directories.
# mind that directories ending with a slash will transport the contents of the directory,
# whereas directories not ending with a slash will transport the directory itself.
def scp_put(self, files, remote_path):
LOG.info("%s >>>> %s" % (files, remote_path))
if self.sshClient is None:
raise RuntimeError("Missing ssh client: see Transport.create_ssh_client(password).")
scp = SCPClient(transport=self.sshClient.get_transport(), progress=self.progress)
preserve_times = True
recursive = True # Can be used both for sending a single file and a directory
msg = "scp -P %d -r [files] %s@%s:%s" % (self.paras.exp_scp_port, self.paras.exp_scp_user,
self.paras.exp_scp_server, remote_path)
LOG.debug("Sending files: " + msg)
self.observers_inform(self, TransportEvent.scp_resources, command=msg)
try:
scp.put(files=files, remote_path=remote_path, preserve_times=preserve_times, recursive=recursive)
except SCPException as err:
LOG.exception("Error while transferring files")
self.count_errors += 1
self.observers_inform(self, TransportEvent.scp_exception, exception=str(err))
def progress(self, filename, size, sent):
# @param progress: callback - called with (filename, size, sent) during transfers
# @type progress: function(string, int, int)
# b'Draaiboek Hilvarenbeek Gelderakkers.doc' 241664 0
# b'Draaiboek Hilvarenbeek Gelderakkers.doc' 241664 16384
# ...
# b'Draaiboek Hilvarenbeek Gelderakkers.doc' 241664 241664
filestr = filename.decode()
self.observers_inform(self, TransportEvent.scp_progress, filename=filestr, size=size, sent=sent)
if sent == 0:
if not self.observers_confirm(self, TransportEvent.transfer_file, filename=filename):
raise ObserverInterruptException("Process interrupted on TransportEvent.transfer_file")
if sent == size:
self.count_transfers += 1
self.observers_inform(self, TransportEvent.scp_transfer_complete,
filename=filestr,
| |
:param VideoSeek: 视频拖拽配置。
注意:此字段可能返回 null,表示取不到有效值。
:type VideoSeek: :class:`tencentcloud.cdn.v20180606.models.VideoSeek`
"""
self.Authentication = None
self.BandwidthAlert = None
self.Cache = None
self.CacheKey = None
self.Compression = None
self.DownstreamCapping = None
self.ErrorPage = None
self.FollowRedirect = None
self.ForceRedirect = None
self.Https = None
self.IpFilter = None
self.IpFreqLimit = None
self.MaxAge = None
self.Origin = None
self.OriginPullOptimization = None
self.RangeOriginPull = None
self.Referer = None
self.RequestHeader = None
self.ResponseHeader = None
self.ResponseHeaderCache = None
self.Seo = None
self.ServiceType = None
self.StatusCodeCache = None
self.VideoSeek = None
def _deserialize(self, params):
if params.get("Authentication") is not None:
self.Authentication = Authentication()
self.Authentication._deserialize(params.get("Authentication"))
if params.get("BandwidthAlert") is not None:
self.BandwidthAlert = BandwidthAlert()
self.BandwidthAlert._deserialize(params.get("BandwidthAlert"))
if params.get("Cache") is not None:
self.Cache = Cache()
self.Cache._deserialize(params.get("Cache"))
if params.get("CacheKey") is not None:
self.CacheKey = CacheKey()
self.CacheKey._deserialize(params.get("CacheKey"))
if params.get("Compression") is not None:
self.Compression = Compression()
self.Compression._deserialize(params.get("Compression"))
if params.get("DownstreamCapping") is not None:
self.DownstreamCapping = DownstreamCapping()
self.DownstreamCapping._deserialize(params.get("DownstreamCapping"))
if params.get("ErrorPage") is not None:
self.ErrorPage = ErrorPage()
self.ErrorPage._deserialize(params.get("ErrorPage"))
if params.get("FollowRedirect") is not None:
self.FollowRedirect = FollowRedirect()
self.FollowRedirect._deserialize(params.get("FollowRedirect"))
if params.get("ForceRedirect") is not None:
self.ForceRedirect = ForceRedirect()
self.ForceRedirect._deserialize(params.get("ForceRedirect"))
if params.get("Https") is not None:
self.Https = Https()
self.Https._deserialize(params.get("Https"))
if params.get("IpFilter") is not None:
self.IpFilter = IpFilter()
self.IpFilter._deserialize(params.get("IpFilter"))
if params.get("IpFreqLimit") is not None:
self.IpFreqLimit = IpFreqLimit()
self.IpFreqLimit._deserialize(params.get("IpFreqLimit"))
if params.get("MaxAge") is not None:
self.MaxAge = MaxAge()
self.MaxAge._deserialize(params.get("MaxAge"))
if params.get("Origin") is not None:
self.Origin = Origin()
self.Origin._deserialize(params.get("Origin"))
if params.get("OriginPullOptimization") is not None:
self.OriginPullOptimization = OriginPullOptimization()
self.OriginPullOptimization._deserialize(params.get("OriginPullOptimization"))
if params.get("RangeOriginPull") is not None:
self.RangeOriginPull = RangeOriginPull()
self.RangeOriginPull._deserialize(params.get("RangeOriginPull"))
if params.get("Referer") is not None:
self.Referer = Referer()
self.Referer._deserialize(params.get("Referer"))
if params.get("RequestHeader") is not None:
self.RequestHeader = RequestHeader()
self.RequestHeader._deserialize(params.get("RequestHeader"))
if params.get("ResponseHeader") is not None:
self.ResponseHeader = ResponseHeader()
self.ResponseHeader._deserialize(params.get("ResponseHeader"))
if params.get("ResponseHeaderCache") is not None:
self.ResponseHeaderCache = ResponseHeaderCache()
self.ResponseHeaderCache._deserialize(params.get("ResponseHeaderCache"))
if params.get("Seo") is not None:
self.Seo = Seo()
self.Seo._deserialize(params.get("Seo"))
self.ServiceType = params.get("ServiceType")
if params.get("StatusCodeCache") is not None:
self.StatusCodeCache = StatusCodeCache()
self.StatusCodeCache._deserialize(params.get("StatusCodeCache"))
if params.get("VideoSeek") is not None:
self.VideoSeek = VideoSeek()
self.VideoSeek._deserialize(params.get("VideoSeek"))
class PathRule(AbstractModel):
"""分路径回源配置规则。
"""
def __init__(self):
"""
:param Regex: 是否开启通配符“*”匹配:
false:关闭
true:开启
注意:此字段可能返回 null,表示取不到有效值。
:type Regex: bool
:param Path: 匹配的URL路径,仅支持Url路径,不支持参数。默认完全匹配,开启通配符“*”匹配后,最多支持5个通配符,最大长度为1024个字符。
注意:此字段可能返回 null,表示取不到有效值。
:type Path: str
:param Origin: 路径匹配时的回源源站。暂不支持开了私有读写的COS源。不填写时沿用默认源站。
注意:此字段可能返回 null,表示取不到有效值。
:type Origin: str
:param ServerName: 路径匹配时回源的Host头部。不填写时沿用默认ServerName。
注意:此字段可能返回 null,表示取不到有效值。
:type ServerName: str
:param OriginArea: 源站所属区域,支持CN,OV:
CN:中国境内
OV:中国境外
默认为CN。
注意:此字段可能返回 null,表示取不到有效值。
:type OriginArea: str
:param ForwardUri: 路径匹配时回源的URI路径,必须以“/”开头,不包含参数部分。最大长度为1024个字符。可使用$1, $2, $3, $4, $5分别捕获匹配路径中的通配符号“*”,最多支持10个捕获值。
注意:此字段可能返回 null,表示取不到有效值。
:type ForwardUri: str
:param RequestHeaders: 路径匹配时回源的头部设置。
注意:此字段可能返回 null,表示取不到有效值。
:type RequestHeaders: list of HttpHeaderRule
"""
self.Regex = None
self.Path = None
self.Origin = None
self.ServerName = None
self.OriginArea = None
self.ForwardUri = None
self.RequestHeaders = None
def _deserialize(self, params):
self.Regex = params.get("Regex")
self.Path = params.get("Path")
self.Origin = params.get("Origin")
self.ServerName = params.get("ServerName")
self.OriginArea = params.get("OriginArea")
self.ForwardUri = params.get("ForwardUri")
if params.get("RequestHeaders") is not None:
self.RequestHeaders = []
for item in params.get("RequestHeaders"):
obj = HttpHeaderRule()
obj._deserialize(item)
self.RequestHeaders.append(obj)
class PurgePathCacheRequest(AbstractModel):
"""PurgePathCache请求参数结构体
"""
def __init__(self):
"""
:param Paths: 目录列表,需要包含协议头部 http:// 或 https://
:type Paths: list of str
:param FlushType: 刷新类型
flush:刷新产生更新的资源
delete:刷新全部资源
:type FlushType: str
:param UrlEncode: 是否对中文字符进行编码后刷新
:type UrlEncode: bool
"""
self.Paths = None
self.FlushType = None
self.UrlEncode = None
def _deserialize(self, params):
self.Paths = params.get("Paths")
self.FlushType = params.get("FlushType")
self.UrlEncode = params.get("UrlEncode")
class PurgePathCacheResponse(AbstractModel):
"""PurgePathCache返回参数结构体
"""
def __init__(self):
"""
:param TaskId: 刷新任务 ID,同一批次提交的目录共用一个任务 ID
:type TaskId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class PurgeTask(AbstractModel):
"""刷新任务详情
"""
def __init__(self):
"""
:param TaskId: 刷新任务 ID
:type TaskId: str
:param Url: 刷新 URL
:type Url: str
:param Status: 刷新任务状态
fail:刷新失败
done:刷新成功
process:刷新中
:type Status: str
:param PurgeType: 刷新类型
url:URL 刷新
path:目录刷新
:type PurgeType: str
:param FlushType: 刷新方式
flush:刷新更新资源(仅目录刷新时有此类型)
delete:刷新全部资源
:type FlushType: str
:param CreateTime: 刷新任务提交时间
:type CreateTime: str
"""
self.TaskId = None
self.Url = None
self.Status = None
self.PurgeType = None
self.FlushType = None
self.CreateTime = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.Url = params.get("Url")
self.Status = params.get("Status")
self.PurgeType = params.get("PurgeType")
self.FlushType = params.get("FlushType")
self.CreateTime = params.get("CreateTime")
class PurgeUrlsCacheRequest(AbstractModel):
"""PurgeUrlsCache请求参数结构体
"""
def __init__(self):
"""
:param Urls: URL 列表,需要包含协议头部 http:// 或 https://
:type Urls: list of str
:param Area: 刷新区域
无此参数时,默认刷新加速域名所在加速区域
填充 mainland 时,仅刷新中国境内加速节点上缓存内容
填充 overseas 时,仅刷新中国境外加速节点上缓存内容
指定刷新区域时,需要与域名加速区域匹配
:type Area: str
:param UrlEncode: 是否对中文字符进行编码后刷新
:type UrlEncode: bool
"""
self.Urls = None
self.Area = None
self.UrlEncode = None
def _deserialize(self, params):
self.Urls = params.get("Urls")
self.Area = params.get("Area")
self.UrlEncode = params.get("UrlEncode")
class PurgeUrlsCacheResponse(AbstractModel):
"""PurgeUrlsCache返回参数结构体
"""
def __init__(self):
"""
:param TaskId: 刷新任务 ID,同一批次提交的 URL 共用一个任务 ID
:type TaskId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class PushTask(AbstractModel):
"""预热任务详情
"""
def __init__(self):
"""
:param TaskId: 预热任务 ID
:type TaskId: str
:param Url: 预热 URL
:type Url: str
:param Status: 预热任务状态
fail:预热失败
done:预热成功
process:预热中
:type Status: str
:param Percent: 预热进度百分比
:type Percent: int
:param CreateTime: 预热任务提交时间
:type CreateTime: str
:param Area: 预热区域
mainland:境内
overseas:境外
global:全球
:type Area: str
:param UpdateTime: 预热任务更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateTime: str
"""
self.TaskId = None
self.Url = None
self.Status = None
self.Percent = None
self.CreateTime = None
self.Area = None
self.UpdateTime = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.Url = params.get("Url")
self.Status = params.get("Status")
self.Percent = params.get("Percent")
self.CreateTime = params.get("CreateTime")
self.Area = params.get("Area")
self.UpdateTime = params.get("UpdateTime")
class PushUrlsCacheRequest(AbstractModel):
"""PushUrlsCache请求参数结构体
"""
def __init__(self):
"""
:param Urls: URL 列表,需要包含协议头部 http:// 或 https://
:type Urls: list of str
:param UserAgent: 指定预热请求回源时 HTTP 请求的 User-Agent 头部
默认为 TencentCdn
:type UserAgent: str
:param Area: 预热生效区域
mainland:预热至境内节点
overseas:预热至境外节点
global:预热全球节点
不填充情况下,默认为 mainland, URL 中域名必须在对应区域启用了加速服务才能提交对应区域的预热任务
:type Area: str
:param Layer: 填写"middle"或不填充时预热至中间层节点
:type Layer: str
:param ParseM3U8: 是否递归解析m3u8文件中的ts分片预热
注意事项:
1. 该功能要求m3u8索引文件能直接请求获取
2. 当前只支持递归解析一级索引和子索引中的ts分片,递归深度不超过3层
3. 解析获取的ts分片会正常累加每日预热用量,当用量超出配额时,会静默处理,不再执行预热
:type ParseM3U8: bool
"""
self.Urls = None
self.UserAgent = None
self.Area = None
self.Layer = None
self.ParseM3U8 = None
def _deserialize(self, params):
self.Urls = params.get("Urls")
self.UserAgent = params.get("UserAgent")
self.Area = params.get("Area")
self.Layer = params.get("Layer")
self.ParseM3U8 = params.get("ParseM3U8")
class PushUrlsCacheResponse(AbstractModel):
"""PushUrlsCache返回参数结构体
"""
def __init__(self):
"""
:param TaskId: 此批提交的任务 ID
:type TaskId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class QueryStringKey(AbstractModel):
"""组成CacheKey的一部分
"""
def __init__(self):
"""
:param Switch: on | off CacheKey是否由QueryString组成
注意:此字段可能返回 null,表示取不到有效值。
:type Switch: str
:param Reorder: 是否重新排序
注意:此字段可能返回 null,表示取不到有效值。
:type Reorder: str
:param Action: includeAll | excludeAll | includeCustom | excludeAll 使用/排除部分url参数
注意:此字段可能返回 null,表示取不到有效值。
:type Action: str
:param Value: 使用/排除的url参数数组,';' 分割
注意:此字段可能返回 null,表示取不到有效值。
:type Value: str
"""
self.Switch = None
self.Reorder = None
self.Action = None
self.Value = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.Reorder = params.get("Reorder")
self.Action = params.get("Action")
self.Value = params.get("Value")
class Quota(AbstractModel):
"""刷新/预热 可用量及配额
"""
def __init__(self):
"""
:param Batch: 单次批量提交配额上限。
:type Batch: int
:param Total: 每日提交配额上限。
:type Total: int
:param Available: 每日剩余的可提交配额。
:type Available: int
:param Area: 配额的区域。
:type Area: str
"""
self.Batch = None
self.Total = None
self.Available = None
self.Area = None
def _deserialize(self, params):
self.Batch = params.get("Batch")
self.Total = params.get("Total")
self.Available = params.get("Available")
self.Area = params.get("Area")
class RangeOriginPull(AbstractModel):
"""分片回源配置,默认为开启状态
"""
def __init__(self):
"""
:param Switch: 分片回源配置开关
on:开启
off:关闭
:type Switch: str
"""
self.Switch = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
class Referer(AbstractModel):
"""Referer 黑白名单配置,默认为关闭状态
"""
def __init__(self):
"""
:param Switch: referer 黑白名单配置开关
on:开启
off:关闭
:type Switch: str
:param RefererRules: referer 黑白名单配置规则
注意:此字段可能返回 null,表示取不到有效值。
:type RefererRules: list of RefererRule
"""
self.Switch = None
self.RefererRules = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
if params.get("RefererRules") is not None:
self.RefererRules = []
for item in params.get("RefererRules"):
obj = RefererRule()
obj._deserialize(item)
self.RefererRules.append(obj)
class RefererRule(AbstractModel):
"""Referer 黑白名单配置规则,针对特定资源生效
"""
def __init__(self):
"""
:param RuleType: 规则类型:
all:所有文件生效
file:指定文件后缀生效
directory:指定路径生效
path:指定绝对路径生效
:type RuleType: str
:param RulePaths: RuleType 对应类型下的匹配内容:
all 时填充 *
file 时填充后缀名,如 jpg、txt
directory 时填充路径,如 /xxx/test/
path 时填充绝对路径,如 /xxx/test.html
:type RulePaths: list of str
:param RefererType: referer 配置类型
whitelist:白名单
blacklist:黑名单
:type RefererType: str
:param Referers: referer 内容列表列表
:type Referers: list of str
:param AllowEmpty: 是否允许空 referer
true:允许空 referer
false:不允许空 referer
:type AllowEmpty: bool
"""
self.RuleType = None
self.RulePaths = None
self.RefererType = None
self.Referers = None
self.AllowEmpty = None
def _deserialize(self, params):
self.RuleType = params.get("RuleType")
self.RulePaths = params.get("RulePaths")
self.RefererType = params.get("RefererType")
self.Referers = params.get("Referers")
self.AllowEmpty = params.get("AllowEmpty")
class RegionMapRelation(AbstractModel):
"""区域映射id和子区域id的关联信息。
"""
def __init__(self):
"""
:param RegionId: 区域ID。
:type RegionId: int
:param SubRegionIdList: 子区域ID列表
:type SubRegionIdList: list of int
"""
self.RegionId = None
self.SubRegionIdList = None
def _deserialize(self, params):
self.RegionId = params.get("RegionId")
self.SubRegionIdList = params.get("SubRegionIdList")
class ReportData(AbstractModel):
"""CDN报表数据
"""
def __init__(self):
"""
:param ResourceId: 项目ID/域名ID。
:type ResourceId: str
:param Resource: 项目名称/域名。
:type Resource: str
:param Value: 流量总和/带宽最大值,单位分别为bytes,bps。
:type Value: int
:param Percentage: 单个资源占总体百分比。
:type Percentage: float
:param BillingValue: 计费流量总和/计费带宽最大值,单位分别为bytes,bps。
:type BillingValue: int
:param BillingPercentage: 计费数值占总体百分比。
:type BillingPercentage: float
| |
from __future__ import print_function
from .utils import *
from .types import *
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import numpy as np #pip install numpy
import msgpack
import time
import math
import logging
class VehicleClient:
def __init__(self, ip = "", port = 41451, timeout_value = 3600):
if (ip == ""):
ip = "127.0.0.1"
self.client = msgpackrpc.Client(msgpackrpc.Address(ip, port), timeout = timeout_value, pack_encoding = 'utf-8', unpack_encoding = 'utf-8')
# ----------------------------------- Common vehicle APIs ---------------------------------------------
def reset(self):
"""
Reset the vehicle to its original starting state
Note that you must call `enableApiControl` and `armDisarm` again after the call to reset
"""
self.client.call('reset')
def ping(self):
"""
If connection is established then this call will return true otherwise it will be blocked until timeout
Returns:
bool:
"""
return self.client.call('ping')
def getClientVersion(self):
return 1 # sync with C++ client
def getServerVersion(self):
return self.client.call('getServerVersion')
def getMinRequiredServerVersion(self):
return 1 # sync with C++ client
def getMinRequiredClientVersion(self):
return self.client.call('getMinRequiredClientVersion')
# basic flight control
def enableApiControl(self, is_enabled, vehicle_name = ''):
"""
Enables or disables API control for vehicle corresponding to vehicle_name
Args:
is_enabled (bool): True to enable, False to disable API control
vehicle_name (str, optional): Name of the vehicle to send this command to
"""
self.client.call('enableApiControl', is_enabled, vehicle_name)
def isApiControlEnabled(self, vehicle_name = ''):
"""
Returns true if API control is established.
If false (which is default) then API calls would be ignored. After a successful call to `enableApiControl`, `isApiControlEnabled` should return true.
Args:
vehicle_name (str, optional): Name of the vehicle
Returns:
bool: If API control is enabled
"""
return self.client.call('isApiControlEnabled', vehicle_name)
def armDisarm(self, arm, vehicle_name = ''):
"""
Arms or disarms vehicle
Args:
arm (bool): True to arm, False to disarm the vehicle
vehicle_name (str, optional): Name of the vehicle to send this command to
Returns:
bool: Success
"""
return self.client.call('armDisarm', arm, vehicle_name)
def simPause(self, is_paused):
"""
Pauses simulation
Args:
is_paused (bool): True to pause the simulation, False to release
"""
self.client.call('simPause', is_paused)
def simIsPause(self):
"""
Returns true if the simulation is paused
Returns:
bool: If the simulation is paused
"""
return self.client.call("simIsPaused")
def simContinueForTime(self, seconds):
"""
Continue the simulation for the specified number of seconds
Args:
seconds (float): Time to run the simulation for
"""
self.client.call('simContinueForTime', seconds)
def getHomeGeoPoint(self, vehicle_name = ''):
"""
Get the Home location of the vehicle
Args:
vehicle_name (str, optional): Name of vehicle to get home location of
Returns:
GeoPoint: Home location of the vehicle
"""
return GeoPoint.from_msgpack(self.client.call('getHomeGeoPoint', vehicle_name))
def confirmConnection(self):
"""
Checks state of connection every 1 sec and reports it in Console so user can see the progress for connection.
"""
if self.ping():
print("Connected!")
else:
print("Ping returned false!")
server_ver = self.getServerVersion()
client_ver = self.getClientVersion()
server_min_ver = self.getMinRequiredServerVersion()
client_min_ver = self.getMinRequiredClientVersion()
ver_info = "Client Ver:" + str(client_ver) + " (Min Req: " + str(client_min_ver) + \
"), Server Ver:" + str(server_ver) + " (Min Req: " + str(server_min_ver) + ")"
if server_ver < server_min_ver:
print(ver_info, file=sys.stderr)
print("AirSim server is of older version and not supported by this client. Please upgrade!")
elif client_ver < client_min_ver:
print(ver_info, file=sys.stderr)
print("AirSim client is of older version and not supported by this server. Please upgrade!")
else:
print(ver_info)
print('')
def simSwapTextures(self, tags, tex_id = 0, component_id = 0, material_id = 0):
"""
Runtime Swap Texture API
See https://microsoft.github.io/AirSim/retexturing/ for details
Args:
tags (str): string of "," or ", " delimited tags to identify on which actors to perform the swap
tex_id (int, optional): indexes the array of textures assigned to each actor undergoing a swap
If out-of-bounds for some object's texture set, it will be taken modulo the number of textures that were available
component_id (int, optional):
material_id (int, optional):
Returns:
list[str]: List of objects which matched the provided tags and had the texture swap perfomed
"""
return self.client.call("simSwapTextures", tags, tex_id, component_id, material_id)
# time-of-day control
def simSetTimeOfDay(self, is_enabled, start_datetime = "", is_start_datetime_dst = False, celestial_clock_speed = 1, update_interval_secs = 60, move_sun = True):
"""
Control the position of Sun in the environment
Sun's position is computed using the coordinates specified in `OriginGeopoint` in settings for the date-time specified in the argument,
else if the string is empty, current date & time is used
Args:
is_enabled (bool): True to enable time-of-day effect, False to reset the position to original
start_datetime (str, optional): Date & Time in %Y-%m-%d %H:%M:%S format, e.g. `2018-02-12 15:20:00`
is_start_datetime_dst (bool, optional): True to adjust for Daylight Savings Time
celestial_clock_speed (float, optional): Run celestial clock faster or slower than simulation clock
E.g. Value 100 means for every 1 second of simulation clock, Sun's position is advanced by 100 seconds
so Sun will move in sky much faster
update_interval_secs (float, optional): Interval to update the Sun's position
move_sun (bool, optional): Whether or not to move the Sun
"""
self.client.call('simSetTimeOfDay', is_enabled, start_datetime, is_start_datetime_dst, celestial_clock_speed, update_interval_secs, move_sun)
# weather
def simEnableWeather(self, enable):
"""
Enable Weather effects. Needs to be called before using `simSetWeatherParameter` API
Args:
enable (bool): True to enable, False to disable
"""
self.client.call('simEnableWeather', enable)
def simSetWeatherParameter(self, param, val):
"""
Enable various weather effects
Args:
param (WeatherParameter): Weather effect to be enabled
val (float): Intensity of the effect, Range 0-1
"""
self.client.call('simSetWeatherParameter', param, val)
# camera control
# simGetImage returns compressed png in array of bytes
# image_type uses one of the ImageType members
def simGetImage(self, camera_name, image_type, vehicle_name = ''):
"""
Get a single image
Returns bytes of png format image which can be dumped into abinary file to create .png image
`string_to_uint8_array()` can be used to convert into Numpy unit8 array
See https://microsoft.github.io/AirSim/image_apis/ for details
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
vehicle_name (str, optional): Name of the vehicle with the camera
Returns:
Binary string literal of compressed png image
"""
# todo: in future remove below, it's only for compatibility to pre v1.2
camera_name = str(camera_name)
# because this method returns std::vector<uint8>, msgpack decides to encode it as a string unfortunately.
result = self.client.call('simGetImage', camera_name, image_type, vehicle_name)
if (result == "" or result == "\0"):
return None
return result
# camera control
# simGetImage returns compressed png in array of bytes
# image_type uses one of the ImageType members
def simGetImages(self, requests, vehicle_name = ''):
"""
Get multiple images
See https://microsoft.github.io/AirSim/image_apis/ for details and examples
Args:
requests (list[ImageRequest]): Images required
vehicle_name (str, optional): Name of vehicle associated with the camera
Returns:
list[ImageResponse]:
"""
responses_raw = self.client.call('simGetImages', requests, vehicle_name)
return [ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def simRunConsoleCommand(self, command):
"""
Allows the client to execute a command in Unreal's native console, via an API.
Affords access to the countless built-in commands such as "stat unit", "stat fps", "open [map]", adjust any config settings, etc. etc.
Allows the user to create bespoke APIs very easily, by adding a custom event to the level blueprint, and then calling the console command "ce MyEventName [args]". No recompilation of AirSim needed!
Args:
command ([string]): Desired Unreal Engine Console command to run
Returns:
[bool]: Success
"""
return self.client.call('simRunConsoleCommand', command)
# gets the static meshes in the unreal scene
def simGetMeshPositionVertexBuffers(self):
"""
Returns the static meshes that make up the scene
See https://microsoft.github.io/AirSim/meshes/ for details and how to use this
Returns:
list[MeshPositionVertexBuffersResponse]:
"""
responses_raw = self.client.call('simGetMeshPositionVertexBuffers')
return [MeshPositionVertexBuffersResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def simGetCollisionInfo(self, vehicle_name = ''):
"""
Args:
vehicle_name (str, optional): Name of the Vehicle to get the info of
Returns:
CollisionInfo:
"""
return CollisionInfo.from_msgpack(self.client.call('simGetCollisionInfo', vehicle_name))
def simSetVehiclePose(self, pose, ignore_collison, vehicle_name = ''):
"""
Set the pose of the vehicle
If you don't want to change position (or orientation) then just set components of position (or orientation) to floating point nan values
Args:
pose (Pose): Desired Pose pf the vehicle
ignore_collision (bool): Whether to ignore any collision or not
vehicle_name (str, optional): Name of the vehicle to move
"""
self.client.call('simSetVehiclePose', pose, ignore_collison, vehicle_name)
def simGetVehiclePose(self, vehicle_name = ''):
"""
Args:
vehicle_name (str, | |
from elevate import elevate
elevate()
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5 import uic
from pxpowersh import PxPowershell
import hjson
import sys
import os
from collections import OrderedDict
from random import randrange
from threading import Thread
import requests
import re
from urllib.parse import quote, unquote
from tkinter import messagebox
import tkinter as tk
root = tk.Tk()
root.withdraw()
try:
requests.get('https://1.1.1.1')
except requests.exceptions.ConnectionError:
messagebox.showerror('Connection error', 'Could not connect to the internet. Please check your connection and try again.')
sys.exit()
sys.stdout = open('log.txt', 'w')
def remove_special_chars(s):
return ' '.join(''.join(i for i in s if i not in "\/:*?<>|").split()).replace('...', '…').rstrip('.')
def web_download(url, path):
if os.path.exists(path): return
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
class UI(QMainWindow):
def __init__(self):
super(UI, self).__init__()
uic.loadUi("design.ui", self)
# find the widgets in the xml file
self.debloatTree = self.findChild(QtWidgets.QTreeWidget, "treeWidget_2")
self.optimizeTree = self.findChild(QtWidgets.QTreeWidget, "treeWidget_3")
self.uicustomizationTree = self.findChild(QtWidgets.QTreeWidget, "treeWidget_4")
self.installprogramsTree = self.findChild(QtWidgets.QTreeWidget, "treeWidget_5")
self.stackedWidget = self.findChild(QtWidgets.QStackedWidget, "stackedWidget")
self.run_button = self.findChild(QtWidgets.QPushButton, "pushButton")
self.install_programs_button = self.findChild(QtWidgets.QPushButton, "pushButton_2")
self.log_list = self.findChild(QtWidgets.QListWidget, "listWidget")
self.create_restore_point = self.findChild(QtWidgets.QCheckBox, "checkBox")
self.status_label = self.findChild(QtWidgets.QLabel, "label_2")
self.progress_bar = self.findChild(QtWidgets.QProgressBar, "progressBar")
self.debloat_select_default = self.findChild(QtWidgets.QPushButton, "pushButton_3")
self.debloat_unselect_all = self.findChild(QtWidgets.QPushButton, "pushButton_4")
self.debloat_unselect_uwp = self.findChild(QtWidgets.QPushButton, "pushButton_9")
self.optimize_select_default = self.findChild(QtWidgets.QPushButton, "pushButton_5")
self.optimize_unselect_all = self.findChild(QtWidgets.QPushButton, "pushButton_6")
self.ui_select_default = self.findChild(QtWidgets.QPushButton, "pushButton_7")
self.ui_unselect_all = self.findChild(QtWidgets.QPushButton, "pushButton_8")
self.debloat_essential = self.findChild(QtWidgets.QCheckBox, "checkBox_2")
self.optimize_essential = self.findChild(QtWidgets.QCheckBox, "checkBox_3")
self.ui_essential = self.findChild(QtWidgets.QCheckBox, "checkBox_4")
# set default column widths
self.optimizeTree.setColumnWidth(0, 400)
self.installprogramsTree.setColumnWidth(0, 250)
# load data
self.data = dict(hjson.load(open('data.hjson')))
self.treeitems = {'debloat': [], 'optimize': [], 'ui': [], 'install': []}
# load ui
self.loadui(self.debloatTree, self.data['Debloat'], 'debloat')
self.loadui(self.optimizeTree, self.data['Optimization'], 'optimize', columns=['recommend'])
self.loadui(self.uicustomizationTree, self.data['UI Customization'], 'ui')
self.loadui(self.installprogramsTree, self.data['Install Programs'], 'install', columns=['description'])
# expand trees
self.optimizeTree.expandToDepth(0)
self.uicustomizationTree.expandToDepth(0)
self.installprogramsTree.expandToDepth(0)
# set connections
self.run_button.clicked.connect(lambda: self.start(None))
self.install_programs_button.clicked.connect(lambda: self.start(['install']))
self.debloat_select_default.clicked.connect(lambda: self.group_select('debloat', True))
self.debloat_unselect_all.clicked.connect(lambda: self.group_select('debloat', False))
self.optimize_select_default.clicked.connect(lambda: self.group_select('optimize', True))
self.optimize_unselect_all.clicked.connect(lambda: self.group_select('optimize', False))
self.ui_select_default.clicked.connect(lambda: self.group_select('ui', True))
self.ui_unselect_all.clicked.connect(lambda: self.group_select('ui', False))
self.debloat_unselect_uwp.clicked.connect(self.unselect_uwp)
# show the ui
self.show()
def unselect_uwp(self):
self.group_select('debloat', True)
for item in self.treeitems['debloat']:
if item['value'].get('uwppackage'):
item['treeitem'].setCheckState(0, QtCore.Qt.Unchecked)
def group_select(self, section, defaults=True):
essentials_check = {'debloat': self.debloat_essential, 'optimize': self.optimize_essential, 'ui': self.ui_essential}
for item in self.treeitems[section]:
if defaults:
item['treeitem'].setCheckState(0, QtCore.Qt.Checked if item['value'].get('default') else QtCore.Qt.Unchecked)
else:
item['treeitem'].setCheckState(0, QtCore.Qt.Unchecked)
if essentials_check.get(section):
essentials_check[section].setChecked(defaults)
def start(self, sections=None):
# get items to run
if sections == None:
sections = ['debloat', 'optimize', 'ui', 'install']
treeitems = {}
for key in self.treeitems.keys():
if key in sections:
treeitems[key] = self.treeitems[key]
self.stackedWidget.setCurrentIndex(1)
self.progress_bar.setMinimum(0)
self.progress_bar.setMaximum(0)
self.progress_bar.setValue(0)
QtWidgets.QApplication.processEvents()
# run
t = Thread(target=self.run, args=(treeitems,))
t.daeomon = True
t.start()
while t.is_alive():
self.log_list.scrollToBottom()
QtWidgets.QApplication.processEvents()
# change progress bar
self.progress_bar.setMinimum(0)
self.progress_bar.setMaximum(1)
self.progress_bar.setValue(1)
QtWidgets.QApplication.processEvents()
if len(sections) == len(self.treeitems): # if all sections was ran
if self.error_output:
self.log('Error', append_dots=False, add_to_list=False, process_events=True)
messagebox.showerror('Amelio - Error', self.error_output)
self.error_output = False
else:
self.log('Restarting explorer')
print(self.px.run('stop-process -name explorer -force'))
self.log('Complete.', append_dots=False, process_events=True)
if messagebox.askyesno('Amelio - Complete', 'Operation has completed. Would you like to restart now?'):
self.px.run('shutdown /r /t 0')
sys.exit()
else:
self.log('Complete.', append_dots=False, process_events=True)
messagebox.showinfo('Amelio - Complete', 'Operation has completed.')
self.stackedWidget.setCurrentIndex(0)
QtWidgets.QApplication.processEvents()
def download_installer(self, download_url, args=None):
name = remove_special_chars(unquote(download_url.split('/')[-1]))
self.log('Downloading '+name)
name = name.split('.')
name = '.'.join(name[:-1]) + ''.join([str(randrange(10)) for _ in range(10)]) + '.' + name[-1]
path = os.path.join(os.environ['tmp'], name)
web_download(download_url, path)
return f'cmd /c "{path}" ' + (args if args else '')
def get_essential(self, section):
commands = []
if self.data[section].get('essential'):
if self.data[section]['essential'].get('command'):
for i in self.data[section]['essential']['command'].split('\n'):
commands.append(i)
if self.data[section]['essential'].get('sophia'):
commands.append('./scripts/Sophia/Sophia.ps1 -Functions "' + '", "'.join(self.data[section]['essential']['sophia'].split('\n')) + '"')
return commands
def log(self, x, append_dots=True, add_to_list=True, process_events=False):
if append_dots: x += (('' if ':' in x else '...') if not x.endswith('.') else '')
print(x)
self.status_label.setText(x)
if add_to_list:
self.log_list.addItem(x)
self.log_list.scrollToBottom()
if process_events:
QtWidgets.QApplication.processEvents()
def run_shutup10(self, ooconfig):
name = 'ooshutup10' + ''.join([str(randrange(10)) for _ in range(10)]) + '.cfg'
path = os.path.join(os.environ['tmp'], name)
with open(path, 'w') as f:
for key, value in ooconfig.items():
f.write(key + '\t' + ('+' if value else '-') + '\n')
return f'cmd /c scripts\Shutup10\OOSU10.exe "{path}" /quiet'
def run(self, treeitems):
self.log_list.clear()
self.error_output = False
run_all_sections = len(treeitems) == len(self.treeitems)
self.log('Starting powershell process')
try:
self.px = PxPowershell(debug=True)
self.px.start_process()
except:
self.error_output = 'Could not start powershell. Please close out of any other applications and try again.'
return
self.log('Setting powershell execution policy')
self.px.run(self.data['SetExecutionPolicy'])
try:
# installing choco
self.log('Checking if choco is installed')
if len(self.px.run('choco -v', timeout=9999).strip()) > 15:
self.log('Installing choco')
self.px.run(self.data['InstallChoco'], timeout=9999)
if self.optimize_essential.isChecked():
ooconfig = self.data['ooconfig']
else:
ooconfig = {}
# list of commands
commands = []
# initialize lists for packages
uwppackages = []
chocopackages = {}
pippackages = []
# other functions list
otherfuncslist = []
# get essential commands
if run_all_sections:
self.log('Getting essential commands')
for x, y in {'Debloat': self.debloat_essential.isChecked(),
'UI Customization': self.optimize_essential.isChecked(),
'Optimization': self.ui_essential.isChecked()}.items():
if y: commands.append({'Running essential commands: '+x: self.get_essential(x)})
# get selected commands
for section_items in treeitems.values():
for item in section_items:
if item['treeitem'].checkState(0):
if item['value'].get('command'): # commands
commands.append({'Running: '+item['name']: item['value']['command'].split('\n')})
if item['value'].get('uwppackage'): # uwp packages to remove
uwppackages.append(item['value']['uwppackage'])
elif item['value'].get('choco'): # choco packages to install
chocopackages.update({item['name']: item['value']['choco'].split('\n')})
elif item['value'].get('pip'): # pip packages to install
pippackages.append(item['name'].lower())
elif item['value'].get('ooconfig'): # ooconfig
ooconfig.update(item['value']['ooconfig'])
elif item['value'].get('download'): # download installer files
commands.append({'Installing '+item['name']: [self.download_installer(item['value']['download'], item['value'].get('args'))]})
elif item['value'].get('run_in_file'): # other (idk how to do these in powershell so these will be ran in python)
otherfuncslist.append(item['value']['flag'])
# o&o shutup10 config
if len(ooconfig) != 0 and run_all_sections:
commands.insert(0, {'Running O&O Shutup10 config': [self.run_shutup10(ooconfig)]})
# adding package commands
if len(uwppackages) > 0: commands.insert(0, {'Removing bloatware UWP packages': ['./scripts/Windows10Debloater/DebloatWindows.ps1 "'+ '\" \"'.join(uwppackages)+ '\"']})
if len(pippackages) > 0: commands.append({'Installing pip packages': ['pip install '+ ' '.join(pippackages)]})
if len(chocopackages) > 0:
for name, chocopackage in chocopackages.items():
commands.append({f'Installing {name}': ['choco install '+ ' '.join(chocopackage) + ' -y --no-progress --no-color -r' ]})
# create restore point
if self.create_restore_point.isChecked():
commands.insert(0, {'Creating restore point': [self.data['CreateRestorePoint']]})
# final commands
for item in commands:
for name, commandlist in item.items():
self.log(name)
for command in commandlist:
print(command)
print(self.px.run(command, timeout=9999))
# run this after config
for flag in otherfuncslist:
self.other_functions(flag)
except Exception as e:
self.error_output = f'An error occured while running.\n\nMore details:\n{e}'
return
def loadui(self, parent, items, section: str, columns: list=[]):
for key, value in items.items():
# ignore essential commands section
if key != 'essential':
# if value has children
if all(type(x) in [OrderedDict, dict] for x in value.values()):
sectionItem = QtWidgets.QTreeWidgetItem(parent, [key])
self.loadui(sectionItem, value, section, columns)
else:
self.treeitems[section].append({
'treeitem': QtWidgets.QTreeWidgetItem(parent, [key] + [('' if value.get(x) == None else str(value[x])) for x in columns]),
'name': key,
'value': value
})
if value.get('default'):
self.treeitems[section][-1]['treeitem'].setCheckState(0, QtCore.Qt.Checked)
else:
self.treeitems[section][-1]['treeitem'].setCheckState(0, QtCore.Qt.Unchecked)
# if value has children
if value.get('img'):
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(f"img/{value['img']}"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.treeitems[section][-1]['treeitem'].setIcon(0, icon)
if value.get('children'):
self.loadui(self.treeitems[section][-1]['treeitem'], value['children'], section, columns)
def other_functions(self, flag):
'''
Other commands will be ran here that cannot be ran in powershell
set this in the json:
run_in_file: true
flag: "flag"
'''
# set ultumate performance
# this will be changed to switch cases once 3.10 is released and fully supported
if flag == 'ultimateperformance':
self.log('Setting ultimate performance mode')
if os.popen('powercfg /L | findstr "Ultimate Performance"').readline().strip() == '':
os.system('powercfg -duplicatescheme e9a42b02-d5df-448d-aa00-03f14749eb61')
# get ultimate performance scheme id
scheme_id = os.popen('powercfg /L | findstr "Ultimate Performance"').readlines()[0].strip().split()[3]
self.px.run(f'powercfg /S {scheme_id}')
return
# theme patcher
elif flag == 'themepatcher':
self.log('Downloading Secure UXTheme')
# path = os.path.join(os.environ['userprofile'], 'Desktop')
path = os.path.join(os.environ['userprofile'], 'Desktop', 'SecureUxTheme')
if not os.path.exists(path):
os.mkdir(path)
web_download('https://cdn.discordapp.com/attachments/714922631693860956/858886114642231346/ThemeTool.exe', os.path.join(path, 'ThemeTool.exe'))
web_download('https://cdn.discordapp.com/attachments/714922631693860956/858886114453225482/SecureUxTheme.dll', os.path.join(path, 'SecureUxTheme.dll'))
# ^^^^^ files compiled by me
# original project: https://github.com/namazso/SecureUxTheme
web_download('https://raw.githubusercontent.com/angelsix/youtube/develop/Windows%2010%20Dark%20Theme/Windows/Tools/MakeAppsUseDarkTheme.reg', os.path.join(path, 'Use Windows Dark Mode.reg'))
# some themes will automatically set windows back to light mode, this will create a shortcut to change it back
return
# after dark cc v2 theme
elif flag == 'afterdarktheme':
self.log('Installing After Dark CC v2')
path = os.path.join('C:\\Windows', 'Resources', 'Themes')
for folder_path in [os.path.join(path, 'After Dark CC'),
os.path.join(path, 'After Dark CC', 'Shell'),
os.path.join(path, 'After Dark CC', 'Shell', 'NormalColor'),
os.path.join(path, 'After Dark CC', 'Shell', 'NormalColor', 'en-US')]:
if not os.path.exists(folder_path):
os.mkdir(folder_path)
web_download('https://raw.githubusercontent.com/angelsix/youtube/develop/Windows%2010%20Dark%20Theme/Windows/Themes/After%20Dark%20CC%20(Creators%20Update%201709)/Show%20Commandbar/After%20Dark%20CC%20v2.theme',
os.path.join(path, 'After Dark CC v2.theme'))
web_download('https://raw.githubusercontent.com/angelsix/youtube/develop/Windows%2010%20Dark%20Theme/Windows/Themes/After%20Dark%20CC%20(Creators%20Update%201709)/Show%20Commandbar/After%20Dark%20CC/After%20Dark%20CC2.msstyles',
os.path.join(path, 'After Dark CC', 'After Dark CC2.msstyles'))
web_download('https://raw.githubusercontent.com/angelsix/youtube/develop/Windows%2010%20Dark%20Theme/Windows/Themes/After%20Dark%20CC%20(Creators%20Update%201709)/Show%20Commandbar/After%20Dark%20CC/Shell/NormalColor/shellstyle.dll',
os.path.join(path, 'After Dark CC', 'Shell', 'NormalColor', 'shellstyle.dll'))
web_download('https://raw.githubusercontent.com//angelsix/youtube/develop/Windows%2010%20Dark%20Theme/Windows/Themes/After%20Dark%20CC%20(Creators%20Update%201709)/Show%20Commandbar/After%20Dark%20CC/Shell/NormalColor/en-US/shellstyle.dll.mui',
os.path.join(path, 'After Dark CC', 'Shell', 'NormalColor', 'en-US', 'shellstyle.dll.mui'))
return
# get the latest sandboxie
elif flag == 'sandboxie-plus':
self.log('Installing Sandboxie-Plus')
github_data = requests.get('https://api.github.com/repos/sandboxie-plus/Sandboxie/releases/latest').json()
for asset in github_data['assets']:
if 'Plus-x64' in asset['browser_download_url']:
print(self.px.run(self.download_installer(asset['browser_download_url'], '/VERYSILENT'), timeout=9999))
return
| |
<filename>DeepWEST/bpti_md.py
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
from math import exp
import pandas as pd
import mdtraj as md
import pickle as pk
import numpy as np
import statistics
import itertools
import fileinput
import fnmatch
import shutil
import random
import math
import os
import re
def create_vectors(x):
"""
Extracts peridic box information from the
given line.
"""
x = str(x)
x = x.replace("Vec3", "")
x = re.findall("\d*\.?\d+", x)
for i in range(0, len(x)):
x[i] = float(x[i])
x = tuple(x)
n = int(len(x) / 3)
x = [x[i * n : (i + 1) * n] for i in range((len(x) + n - 1) // n)]
return x
def prepare_bpti():
"""
Prepares the Bovine pancreatic trypsin inhibitor
system for Molecular Dynamics (MD) simulations. The
function downloads the pdb structure from
http://ambermd.org/tutorials/advanced/tutorial22/files/5PTI-DtoH-dry.pdb
and parameterizes it using General Amber Force Field
(GAFF).
"""
os.system("curl -O http://ambermd.org/tutorials/advanced/tutorial22/files/5PTI-DtoH-dry.pdb")
os.system("rm -rf system_inputs_bpti")
# Removes any existing directory named system_inputs_bpti
os.system("mkdir system_inputs_bpti")
# Creates a directory named system_inputs_bpti
cwd = os.getcwd()
target_dir = cwd + "/" + "system_inputs_bpti"
# save the tleap script to file
with open("input_TIP4P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
loadOff solvents.lib
loadOff tip4pbox.off
loadOff tip4pewbox.off
source leaprc.water.tip4pew
HOH = TP4
pdb = loadpdb 5PTI-DtoH-dry.pdb
bond pdb.55.SG pdb.5.SG
bond pdb.30.SG pdb.51.SG
bond pdb.14.SG pdb.38.SG
charge pdb
addions2 pdb Cl- 6
charge pdb
solvatebox pdb TIP4PEWBOX 12.0
saveamberparm pdb system_TIP4P.prmtop system_TIP4P.inpcrd
saveamberparm pdb system_TIP4P.parm7 system_TIP4P.rst7
savepdb pdb system_TIP4P.pdb
quit
"""
)
os.system("tleap -f input_TIP4P.leap")
os.system("rm -rf leap.log")
shutil.copy(cwd + "/" + "system_TIP4P.inpcrd", target_dir + "/" + "system_TIP4P.inpcrd")
shutil.copy(cwd + "/" + "system_TIP4P.parm7", target_dir + "/" + "system_TIP4P.parm7")
shutil.copy(cwd + "/" + "system_TIP4P.pdb", target_dir + "/" + "system_TIP4P.pdb")
shutil.copy(cwd + "/" + "system_TIP4P.prmtop", target_dir + "/" + "system_TIP4P.prmtop")
shutil.copy(cwd + "/" + "system_TIP4P.rst7", target_dir + "/" + "system_TIP4P.rst7")
shutil.copy(cwd + "/" + "input_TIP4P.leap", target_dir + "/" + "input_TIP4P.leap")
shutil.copy(cwd + "/" + "5PTI-DtoH-dry.pdb", target_dir + "/" + "5PTI-DtoH-dry.pdb")
os.system("rm -rf system_TIP4P.inpcrd")
os.system("rm -rf system_TIP4P.parm7")
os.system("rm -rf system_TIP4P.pdb")
os.system("rm -rf system_TIP4P.rst7")
os.system("rm -rf system_TIP4P.prmtop")
os.system("rm -rf input_TIP4P.leap")
os.system("rm -rf 5PTI-DtoH-dry.pdb")
def simulated_annealing(
parm="system_TIP4P.prmtop",
rst="system_TIP4P.inpcrd",
annealing_output_pdb="system_annealing_output.pdb",
annealing_steps=100000,
pdb_freq=100000,
starting_temp=0,
target_temp=300,
temp_incr=3,
):
"""
Performs simulated annealing of the system from
0K to 300 K (default) using OpenMM MD engine and
saves the last frame of the simulation to be
accessed by the next simulation.
Parameters
----------
parm: str
System's topology file
rst: str
System's coordinate file
annealing_output_pdb: str
System's output trajectory file
annealing_steps: int
Aneealing steps at each temperatrure jump
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
starting_temp: int
Initial temperature of Simulated Annealing
target_temp: int
Final temperature of Simulated Annealing
temp_incr: int
Temmperature increase for every step
"""
prmtop = AmberPrmtopFile(parm)
inpcrd = AmberInpcrdFile(rst)
annealing_system = prmtop.createSystem(nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds)
annealing_integrator = LangevinIntegrator(0 * kelvin, 1 / picosecond, 2 * femtoseconds)
total_steps = ((target_temp / temp_incr) + 1) * annealing_steps
annealing_temp_range = int((target_temp / temp_incr) + 1)
annealing_platform = Platform.getPlatformByName("CUDA")
annealing_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
annealing_simulation = Simulation(prmtop.topology, annealing_system, annealing_integrator, annealing_platform, annealing_properties)
annealing_simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
annealing_simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
annealing_simulation.minimizeEnergy()
annealing_simulation.reporters.append(PDBReporter(annealing_output_pdb, pdb_freq))
simulated_annealing_last_frame = (annealing_output_pdb[:-4] + "_last_frame.pdb")
annealing_simulation.reporters.append(PDBReporter(simulated_annealing_last_frame, total_steps))
annealing_simulation.reporters.append(StateDataReporter(stdout, pdb_freq, step=True, time=True, potentialEnergy=True, totalSteps=total_steps, temperature=True, progress=True, remainingTime=True, speed=True, separator="\t"))
temp = starting_temp
while temp <= target_temp:
annealing_integrator.setTemperature(temp * kelvin)
if temp == starting_temp:
annealing_simulation.step(annealing_steps)
annealing_simulation.saveState("annealing.state")
else:
annealing_simulation.loadState("annealing.state")
annealing_simulation.step(annealing_steps)
temp += temp_incr
state = annealing_simulation.context.getState()
print(state.getPeriodicBoxVectors())
annealing_simulation_box_vectors = state.getPeriodicBoxVectors()
print(annealing_simulation_box_vectors)
with open("annealing_simulation_box_vectors.pkl", "wb") as f:
pk.dump(annealing_simulation_box_vectors, f)
print("Finshed NVT Simulated Annealing Simulation")
def npt_equilibration_bpti(
parm="system_TIP4P.prmtop",
npt_output_pdb="system_npt_output.pdb",
pdb_freq=500000,
npt_steps=5000000,
target_temp=300,
npt_pdb="system_annealing_output_last_frame.pdb",
):
"""
Performs NPT equilibration MD of the system
using OpenMM MD engine and saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
npt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
npt_steps: int
NPT simulation steps
target_temp: int
Temperature for MD simulation
npt_pdb: str
Last frame of the simulation
"""
npt_init_pdb = PDBFile(npt_pdb)
prmtop = AmberPrmtopFile(parm)
npt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds)
barostat = MonteCarloBarostat(25.0 * bar, target_temp * kelvin, 25)
npt_system.addForce(barostat)
npt_integrator = LangevinIntegrator(target_temp * kelvin, 1 / picosecond, 2 * femtoseconds)
npt_platform = Platform.getPlatformByName("CUDA")
npt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
npt_simulation = Simulation(prmtop.topology, npt_system, npt_integrator, npt_platform, npt_properties)
npt_simulation.context.setPositions(npt_init_pdb.positions)
npt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("annealing_simulation_box_vectors.pkl", "rb") as f:
annealing_simulation_box_vectors = pk.load(f)
annealing_simulation_box_vectors = create_vectors(annealing_simulation_box_vectors)
npt_simulation.context.setPeriodicBoxVectors(annealing_simulation_box_vectors[0], annealing_simulation_box_vectors[1], annealing_simulation_box_vectors[2])
npt_last_frame = npt_output_pdb[:-4] + "_last_frame.pdb"
npt_simulation.reporters.append(PDBReporter(npt_output_pdb, pdb_freq))
npt_simulation.reporters.append(PDBReporter(npt_last_frame, npt_steps))
npt_simulation.reporters.append(StateDataReporter(stdout, pdb_freq, step=True, time=True, potentialEnergy=True, totalSteps=npt_steps, temperature=True, progress=True, remainingTime=True, speed=True, separator="\t"))
npt_simulation.minimizeEnergy()
npt_simulation.step(npt_steps)
npt_simulation.saveState("npt_simulation.state")
state = npt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
npt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(npt_simulation_box_vectors)
with open("npt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(npt_simulation_box_vectors, f)
print("Finished NPT Simulation")
def nvt_equilibration_bpti(
parm="system_TIP4P.prmtop",
nvt_output_pdb="system_nvt_output.pdb",
pdb_freq=500000,
nvt_steps=5000000,
target_temp=300,
nvt_pdb="system_npt_output_last_frame.pdb",
):
"""
Performs NVT equilibration MD of the system
using OpenMM MD engine saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
nvt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
nvt_steps: int
NVT simulation steps
target_temp: int
Temperature for MD simulation
nvt_pdb: str
Last frame of the simulation
"""
nvt_init_pdb = PDBFile(nvt_pdb)
prmtop = AmberPrmtopFile(parm)
nvt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds)
nvt_integrator = LangevinIntegrator(target_temp * kelvin, 1 / picosecond, 2 * femtoseconds)
nvt_platform = Platform.getPlatformByName("CUDA")
nvt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
nvt_simulation = Simulation(prmtop.topology, nvt_system, nvt_integrator, nvt_platform, nvt_properties)
nvt_simulation.context.setPositions(nvt_init_pdb.positions)
nvt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("npt_simulation_box_vectors.pkl", "rb") as f:
npt_simulation_box_vectors = pk.load(f)
npt_simulation_box_vectors = create_vectors(npt_simulation_box_vectors)
nvt_simulation.context.setPeriodicBoxVectors(npt_simulation_box_vectors[0], npt_simulation_box_vectors[1], npt_simulation_box_vectors[2])
nvt_last_frame = nvt_output_pdb[:-4] + "_last_frame.pdb"
nvt_simulation.reporters.append(PDBReporter(nvt_output_pdb, pdb_freq))
nvt_simulation.reporters.append(PDBReporter(nvt_last_frame, nvt_steps))
nvt_simulation.reporters.append(StateDataReporter(stdout, pdb_freq, step=True, time=True, potentialEnergy=True, totalSteps=nvt_steps, temperature=True, progress=True, remainingTime=True, speed=True, separator="\t"))
nvt_simulation.minimizeEnergy()
nvt_simulation.step(nvt_steps)
nvt_simulation.saveState("nvt_simulation.state")
state = nvt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
nvt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(nvt_simulation_box_vectors)
with open("nvt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(nvt_simulation_box_vectors, f)
print("Finished NVT Simulation")
def run_equilibration_bpti():
"""
Runs systematic simulated annealing followed by
NPT and NVT equilibration MD simulation.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "equilibration_bpti"
os.system("rm -rf equilibration_bpti")
os.system("mkdir equilibration_bpti")
shutil.copy(cwd + "/" + "system_inputs_bpti" + "/" + "system_TIP4P.inpcrd", target_dir + "/" + "system_TIP4P.inpcrd")
shutil.copy(cwd + "/" + "system_inputs_bpti" + "/" + "system_TIP4P.parm7", target_dir + "/" + "system_TIP4P.parm7")
shutil.copy(cwd + "/" + "system_inputs_bpti" + "/" + "system_TIP4P.pdb", target_dir + "/" + "system_TIP4P.pdb")
shutil.copy(cwd + "/" + "system_inputs_bpti" + "/" + "system_TIP4P.prmtop", target_dir + "/" + "system_TIP4P.prmtop")
shutil.copy(cwd + "/" + "system_inputs_bpti" + "/" + "system_TIP4P.rst7", target_dir + "/" + "system_TIP4P.rst7")
shutil.copy(cwd + "/" + "system_inputs_bpti" + "/" + "5PTI-DtoH-dry.pdb", target_dir + "/" + "5PTI-DtoH-dry.pdb")
shutil.copy(cwd + "/" + "system_inputs_bpti" + "/" + "input_TIP4P.leap", target_dir + "/" + "input_TIP4P.leap")
os.chdir(target_dir)
simulated_annealing()
npt_equilibration_bpti()
nvt_equilibration_bpti()
os.system("rm -rf system_TIP4P.inpcrd")
os.system("rm -rf system_TIP4P.parm7")
os.system("rm -rf system_TIP4P.pdb")
os.system("rm -rf system_TIP4P.rst7")
os.system("rm -rf system_TIP4P.prmtop")
os.system("rm -rf 5PTI-DtoH-dry.pdb")
os.system("rm -rf input_TIP4P.leap")
os.chdir(cwd)
def create_bpti_md():
"""
Prepares starting structures for Amber MD simulations.
All input files required to run Amber MD simulations are
placed in the bpti_md directory.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "bpti_md"
os.system("rm -rf bpti_md")
os.system("mkdir bpti_md")
shutil.copy(cwd + "/" + "equilibration_bpti" + "/" + "system_nvt_output_last_frame.pdb", target_dir + "/" + "system_nvt_output_last_frame.pdb")
os.chdir(target_dir)
os.system("pdb4amber -i system_nvt_output_last_frame.pdb -o intermediate_temp.pdb")
os.system("rm -rf intermediate_temp_renum.txt")
os.system("rm -rf intermediate_temp_sslink")
os.system("rm -rf intermediate_temp_nonprot.pdb")
remove_words = ["H ARG A 1"]
with open("intermediate_temp.pdb") as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
# Save the tleap script to file
with open("final_input_TIP4P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip4pew
pdb = loadpdb intermediate.pdb
charge pdb
saveamberparm pdb system_final.prmtop system_final.inpcrd
saveamberparm pdb system_final.parm7 system_final.rst7
savepdb pdb system_final.pdb
quit
"""
)
os.system("tleap -f final_input_TIP4P.leap")
os.system("rm -rf leap.log")
os.system("rm -rf leap.log")
os.system("rm -rf intermediate.pdb")
os.system("rm -rf intermediate_temp.pdb")
os.system("rm -rf system_nvt_output_last_frame.pdb")
os.chdir(cwd)
def add_vec_inpcrd():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the inpcrd file.
Only to be used when the box dimensions are not
present in the inpcrd file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "bpti_md"
shutil.copy(cwd + "/" + "equilibration_bpti" + "/" + "nvt_simulation_box_vectors.pkl", target_dir + | |
<reponame>piermenti-sfracellozzi/Simple-Ds<gh_stars>1-10
# Tree:
class TreeNode:
def __init__(self, data):
self.data = data
self.children = []
self.parent = None
def add_child(self, child):
child.parent = self
self.children.append(child)
def get_level(self):
level = 0
p = self.parent
while p:
level += 1
p = p.parent
return level
def print_tree(self):
spaces = " " * self.get_level() * 3
prefix = spaces + "|__" if self.parent else ""
print(prefix + str(self.data))
if len(self.children) > 0:
for child in self.children:
child.print_tree()
# Binary Search Tree:
class BinarySearchTreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def represent(self):
if self.data is None:
return "The tree is empty."
else:
print(f"[{self.data}]")
if self.right:
print("|_")
self.right.represent()
if self.left:
print("_|")
self.left.represent()
def add_child(self, data):
if data == self.data:
return "Binary search tree cant have duplicates."
if data < self.data:
if self.left:
self.left.add_child(data)
else:
self.left = BinarySearchTreeNode(data)
else:
if self.right:
self.right.add_child(data)
else:
self.right = BinarySearchTreeNode(data)
def in_order_traversal(self):
elements = []
if self.left:
elements += self.left.in_order_traversal()
elements.append(self.data)
if self.right:
elements += self.right.in_order_traversal()
return elements
def search(self, val):
if self.data == val:
return True
if val < self.data:
if self.left:
self.left.search(val)
else:
return False
if val > self.data:
if self.right:
return self.right.search(val)
else:
return False
def delete(self, val):
if val < self.data:
if self.left:
self.left = self.left.delete(val)
elif val > self.data:
if self.right:
self.right = self.right.delete(val)
else:
if self.left is None and self.right is None:
return None
elif self.left is None:
return self.right
elif self.right is None:
return self.left
min_val = self.right.find_min()
self.data = min_val
self.right = self.right.delete(min_val)
return self
def find_max(self):
if self.right is None:
return self.data
return self.right.find_max()
def find_min(self):
if self.left is None:
return self.data
return self.left.find_min()
class Node:
def __init__(self, data=None, next=None):
self.data = data
self.next = next
class DoublyLinkedNode:
def __init__(self, data=None, next=None, previous=None):
self.data = data
self.next = next
self.previous = previous
class LinkedList:
def __init__(self):
self.head = None
def insert_at_beginning(self, data):
node = Node(data, self.head)
self.head = node
def insert_at_end(self, data):
if self.head is None:
self.head = Node(data, None)
return
itr = self.head
while itr.next:
itr = itr.next
itr.next = Node(data, None)
def insert_values(self, data_list):
self.head = None
for data in data_list:
self.insert_at_end(data)
def represent(self):
if self.head is None:
print("Linked list is empty.")
return
count = 0
itr = self.head
llstr = ""
while itr:
if count > 0:
llstr += " --> " + "[" + str(itr.data) + "]"
else:
llstr += "[" + str(itr.data) + "]"
itr = itr.next
count += 1
print(llstr)
def get_length(self):
count = 0
itr = self.head
while itr:
count += 1
itr = itr.next
return count
def toarray(self):
output_array = []
itr = self.head
while itr:
output_array.append(itr.data)
itr = itr.next
return output_array
def remove_at(self, index):
if index < 0 or index >= self.get_length():
raise IndexError("Invalid Index.")
if index == 0:
self.head = self.head.next
return
count = 0
itr = self.head
while itr:
if count == index - 1:
itr.next = itr.next.next
break
itr = itr.next
count += 1
def remove_at_beginning(self):
if self.head is None:
return "Nothing to remove."
else:
self.remove_at(0)
def remove_at_end(self):
if self.head is None:
return "Nothing to remove."
count = 0
itr = self.head
while itr.next:
itr = itr.next
count += 1
self.remove_at(count)
def insert_at(self, index, data):
if index < 0 or index > self.get_length():
raise IndexError("Invalid Index.")
elif index == 0:
self.insert_at_beginning(data)
return
count = 0
itr = self.head
while itr:
if count == index - 1:
node = Node(data, itr.next)
itr.next = node
break
itr = itr.next
count += 1
def type_all(self):
if self.head is None:
print("The linked list is empty.")
types = []
count_int = 0
count_str = 0
count_bool = 0
count_float = 0
count_complex = 0
count_list = 0
count_tuple = 0
count_dict = 0
count_range = 0
count_set = 0
count_bytes = 0
count_bytearray = 0
count_memoryview = 0
count_frozenset = 0
itr = self.head
while itr:
types.append(type(itr.data))
itr = itr.next
for i in types:
if i == int:
count_int += 1
elif i == str:
count_str += 1
elif i == bool:
count_bool += 1
elif i == float:
count_float += 1
elif i == complex:
count_complex += 1
elif i == list:
count_list += 1
elif i == tuple:
count_tuple += 1
elif i == dict:
count_dict += 1
elif i == range:
count_range += 1
elif i == set:
count_set += 1
elif i == frozenset:
count_frozenset += 1
elif i == bytes:
count_bytes += 1
elif i == bytearray:
count_bytearray += 1
elif i == memoryview:
count_memoryview += 1
if count_int == len(types):
return "Integer"
elif count_str == len(types):
return "String"
elif count_bool == len(types):
return "Boolean"
elif count_float == len(types):
return "Float"
elif count_complex == len(types):
return "Complex"
elif count_list == len(types):
return "List"
elif count_tuple == len(types):
return "Tuple"
elif count_dict == len(types):
return "Dictionary"
elif count_range == len(types):
return "Range"
elif count_set == len(types):
return "Set"
elif count_frozenset == len(types):
return "Frozen Set"
elif count_memoryview == len(types):
return "MemoryView"
elif count_bytes == len(types):
return "Bytes"
elif count_bytearray == len(types):
return "ByteArray"
else:
return "Various"
def type_each(self):
if self.head is None:
print("The linked list is empty.")
types = []
types2 = []
itr = self.head
while itr:
types.append(type(itr.data))
itr = itr.next
for i in types:
if i == int:
types2.append("Integer")
elif i == str:
types2.append("String")
elif i == bool:
types2.append("Boolean")
elif i == float:
types2.append("Float")
elif i == complex:
types2.append("Complex")
elif i == list:
types2.append("List")
elif i == tuple:
types2.append("Tuple")
elif i == dict:
types2.append("Dict")
elif i == range:
types2.append("Range")
elif i == set:
types2.append("Set")
elif i == frozenset:
types2.append("frozenset")
elif i == bytes:
types2.append("bytes")
elif i == bytearray:
types2.append("bytearray")
elif i == memoryview:
types2.append("Memoryview")
print(types2)
class DoublyLinkedList:
def __init__(self):
self.head = None
def insert_at_beginning(self, data):
node = DoublyLinkedNode(data, self.head, None)
self.head = node
def insert_at_end(self, data):
if self.head is None:
self.head = DoublyLinkedNode(data, None, None)
return
itr = self.head
while itr.next:
itr = itr.next
itr.next = DoublyLinkedNode(data, None, itr.previous)
def type_all(self):
if self.head is None:
print("The linked list is empty.")
types = []
count_int = 0
count_str = 0
count_bool = 0
count_float = 0
count_complex = 0
count_list = 0
count_tuple = 0
count_dict = 0
count_range = 0
count_set = 0
count_bytes = 0
count_bytearray = 0
count_memoryview = 0
count_frozenset = 0
itr = self.head
while itr:
types.append(type(itr.data))
itr = itr.next
for i in types:
if i == int:
count_int += 1
elif i == str:
count_str += 1
elif i == bool:
count_bool += 1
elif i == float:
count_float += 1
elif i == complex:
count_complex += 1
elif i == list:
count_list += 1
elif i == tuple:
count_tuple += 1
elif i == dict:
count_dict += 1
elif i == range:
count_range += 1
elif i == set:
count_set += 1
elif i == frozenset:
count_frozenset += 1
elif i == bytes:
count_bytes += 1
elif i == bytearray:
count_bytearray += 1
elif i == memoryview:
count_memoryview += 1
if count_int == len(types):
return "Integer"
elif count_str == len(types):
return "String"
elif count_bool == len(types):
return "Boolean"
elif count_float == len(types):
return "Float"
elif count_complex == len(types):
return "Complex"
elif count_list == len(types):
return "List"
elif count_tuple == len(types):
return "Tuple"
elif count_dict == len(types):
return "Dictionary"
elif count_range == len(types):
return "Range"
elif count_set == len(types):
return "Set"
elif count_frozenset == len(types):
return "Frozen Set"
elif count_memoryview == len(types):
return "MemoryView"
elif count_bytes == len(types):
return "Bytes"
elif count_bytearray == len(types):
return "ByteArray"
else:
return "Various"
def type_each(self):
if self.head is None:
print("The linked list is empty.")
types = []
types2 = []
itr = self.head
while itr:
types.append(type(itr.data))
itr = itr.next
for i in types:
if i == int:
types2.append("Integer")
elif i == str:
types2.append("String")
elif i == bool:
types2.append("Boolean")
elif i == float:
types2.append("Float")
elif i == complex:
types2.append("Complex")
elif i == list:
types2.append("List")
elif i == tuple:
types2.append("Tuple")
elif i | |
(err_q or np.quaternion(1, 0, 0, 0))
light_gl_v = tools.q_times_v(err_q.conj() * SystemModel.sc2gl_q.conj(), light_v)
# new way to discretize light, consistent with real fdb inplementation
if discretize_tol:
dlv, _ = tools.discretize_v(light_gl_v, discretize_tol, lat_range=(-math.pi/2, math.radians(90 - self.min_elong)))
err_angle = tools.angle_between_v(light_gl_v, dlv)
light_gl_v = dlv
return light_gl_v, err_angle
def light_rel_dir(self, err_q=False, discretize_tol=False):
""" direction of light relative to spacecraft in s/c coords """
assert not discretize_tol, 'discretize_tol deprecated at light_rel_dir function'
light_v = tools.normalize_v(self.asteroid.position(self.time.value))
sc_q = self.spacecraft_q
err_q = (err_q or np.quaternion(1, 0, 0, 0))
# old, better way to discretize light, based on asteroid rotation axis, now not in use
if discretize_tol:
ast_q = self.asteroid_q
light_ast_v = tools.q_times_v(ast_q.conj(), light_v)
dlv, _ = tools.discretize_v(light_ast_v, discretize_tol)
err_angle = tools.angle_between_v(light_ast_v, dlv)
light_v = tools.q_times_v(ast_q, dlv)
return tools.q_times_v(err_q.conj() * sc_q.conj(), light_v),\
err_angle if discretize_tol else False
def solar_elongation(self, real=False):
ast_v = self.asteroid.position(self.time.real_value if real else self.time.value)
sc_q = self.real_spacecraft_q if real else self.spacecraft_q
elong, direc = tools.solar_elongation(ast_v, sc_q)
if not BATCH_MODE and DEBUG:
print('elong: %.3f | dir: %.3f' % (
math.degrees(elong), math.degrees(direc)))
return elong, direc
def rel_rot_err(self):
return tools.angle_between_q(
self.sc_asteroid_rel_q(),
self.real_sc_asteroid_rel_q())
def lat_pos_err(self):
real_pos = self.real_spacecraft_pos
err = np.subtract(self.spacecraft_pos, real_pos)
return math.sqrt(err[0]**2 + err[1]**2) / abs(real_pos[2])
def dist_pos_err(self):
real_d = self.real_spacecraft_pos[2]
return abs(self.spacecraft_pos[2] - real_d) / abs(real_d)
def calc_visibility(self, pos=None):
if pos is None:
pos = self.spacecraft_pos
if isinstance(pos, np.ndarray):
pos = pos.reshape((-1, 3))
return_array = True
else:
pos = np.array([pos], shape=(1, 3))
return_array = False
rad = self.asteroid.mean_radius * 0.001
xt = np.abs(pos[:, 2]) * math.tan(math.radians(self.cam.x_fov) / 2)
yt = np.abs(pos[:, 2]) * math.tan(math.radians(self.cam.y_fov) / 2)
# xm = np.clip((xt - (abs(pos[0])-rad))/rad/2, 0, 1)
# ym = np.clip((yt - (abs(pos[1])-rad))/rad/2, 0, 1)
xm = 1 - np.minimum(1, (np.maximum(0, pos[:, 0] + rad - xt) + np.maximum(0, rad - pos[:, 0] - xt)) / rad / 2)
ym = 1 - np.minimum(1, (np.maximum(0, pos[:, 1] + rad - yt) + np.maximum(0, rad - pos[:, 1] - yt)) / rad / 2)
visib = xm * ym * 100
return visib if return_array else visib[0]
def export_state(self, filename):
""" saves state in an easy to access format """
qn = ('w', 'x', 'y', 'z')
vn = ('x', 'y', 'z')
lines = [['type'] + ['ast_q' + i for i in qn] + ['sc_q' + i for i in qn]
+ ['ast_sc_v' + i for i in vn] + ['sun_ast_v' + i for i in vn]]
for t in ('initial', 'real'):
# if settings.USE_ICRS, all in solar system barycentric equatorial frame
ast_q = self.asteroid.rotation_q(self.time.value)
sc_q = self.spacecraft_q
ast_sc_v = tools.q_times_v(sc_q, self.spacecraft_pos)
sun_ast_v = self.asteroid.position(self.time.value)
lines.append((t,) + tuple('%f'%f for f in (tuple(ast_q.components) + tuple(sc_q.components)
+ tuple(ast_sc_v) + tuple(sun_ast_v))))
self.swap_values_with_real_vals()
with open(filename, 'w') as f:
f.write('\n'.join(['\t'.join(l) for l in lines]))
def save_state(self, filename, printout=False):
config = configparser.ConfigParser()
filename = filename+('.lbl' if len(filename)<5 or filename[-4:]!='.lbl' else '')
config.read(filename)
if not config.has_section('main'):
config.add_section('main')
if not config.has_section('real'):
config.add_section('real')
for n, p in self.get_params(all=True):
config.set('main', n, str(p.value))
if p.real_value is not None:
config.set('real', n, str(p.real_value))
if self.asteroid.real_position is not None:
config.set('real', 'sun_asteroid_pos', str(self.asteroid.real_position))
if not printout:
with open(filename, 'w') as f:
config.write(f)
else:
config.write(sys.stdout)
def load_state(self, filename, sc_ast_vertices=False):
if not os.path.isfile(filename):
raise FileNotFoundError(filename)
config = configparser.ConfigParser()
filename = filename+('.lbl' if len(filename)<5 or filename[-4:]!='.lbl' else '')
config.read(filename)
for n, p in self.get_params(all=True):
v = float(config.get('main', n))
if n == 'time':
rp = self.asteroid.rotation_period
p.range = (v-rp/2, v+rp/2)
p.value = v
rv = config.get('real', n, fallback=None)
if rv is not None:
p.real_value = float(rv)
rv = config.get('real', 'sun_asteroid_pos', fallback=None)
if rv is not None:
self.asteroid.real_position = np.fromstring(rv[1:-1], dtype=np.float, sep=' ')
assert np.isclose(self.time.value, float(config.get('main', 'time'))), \
'Failed to set time value: %s vs %s'%(self.time.value, float(config.get('main', 'time')))
self.update_asteroid_model()
if sc_ast_vertices:
# get real relative position of asteroid model vertices
self.asteroid.real_sc_ast_vertices = self.sc_asteroid_vertices(real=True)
@staticmethod
def frm_conv_q(fsrc, fdst, ast=None):
fqm = {
SystemModel.OPENGL_FRAME:np.quaternion(1,0,0,0),
SystemModel.OPENCV_FRAME:SystemModel.cv2gl_q,
SystemModel.SPACECRAFT_FRAME:SystemModel.sc2gl_q,
SystemModel.ASTEROID_FRAME: None if ast is None else ast.ast2sc_q*SystemModel.sc2gl_q,
}
return fqm[fsrc]*fqm[fdst].conj()
def __repr__(self):
return (
'system state:\n\t%s\n'
+ '\nsolar elongation: %s\n'
+ '\nasteroid rotation: %.2f\n'
) % (
'\n\t'.join('%s = %s'%(n, p) for n, p in self.get_params(all=True)),
tuple(map(math.degrees, self.solar_elongation())),
math.degrees(self.asteroid.rotation_theta(self.time.value)),
)
class Camera:
def __init__(self, width, height, x_fov, y_fov):
self.width = width # in pixels
self.height = height # in pixels
self.x_fov = x_fov # in deg
self.y_fov = y_fov # in deg
def intrinsic_camera_mx(self, legacy=True):
return Camera._intrinsic_camera_mx(self.width, self.height, self.x_fov, self.y_fov, legacy=legacy)
def inv_intrinsic_camera_mx(self, legacy=True):
return Camera._inv_intrinsic_camera_mx(self.width, self.height, self.x_fov, self.y_fov, legacy=legacy)
@staticmethod
def _intrinsic_camera_mx(width, height, x_fov, y_fov, legacy=True):
x = width/2
y = height/2
fl_x = x / math.tan(math.radians(x_fov)/2)
fl_y = y / math.tan(math.radians(y_fov)/2)
return np.array([[fl_x * (1 if legacy else -1), 0, x],
[0, fl_y, y],
[0, 0, 1]], dtype="float")
@staticmethod
@lru_cache(maxsize=1)
def _inv_intrinsic_camera_mx(w, h, xfov, yfov, legacy=True):
return np.linalg.inv(Camera._intrinsic_camera_mx(w, h, xfov, yfov, legacy=legacy))
def calc_xy(self, xi, yi, z_off):
""" xi and yi are unaltered image coordinates, z_off is usually negative """
xh = xi + 0.5
# yh = height - (yi+0.5)
yh = yi + 0.5
# zh = -z_off
if True:
iK = self.inv_intrinsic_camera_mx(legacy=False)
x_off, y_off, _ = iK.dot(np.array([xh, yh, 1])) * z_off
else:
cx = xh / self.width - 0.5
cy = yh / self.height - 0.5
h_angle = cx * math.radians(self.x_fov)
x_off = zh * math.tan(h_angle)
v_angle = cy * math.radians(self.y_fov)
y_off = zh * math.tan(v_angle)
# print('%.3f~%.3f, %.3f~%.3f, %.3f~%.3f'%(ax, x_off, ay, y_off, az, z_off))
return x_off, y_off
def calc_img_xy(self, x, y, z):
""" x, y, z are in camera frame (z typically negative), return image coordinates """
K = self.intrinsic_camera_mx(legacy=False)
ix, iy, iw = K.dot(np.array([x, y, z]))
return ix / iw, iy / iw
class Asteroid(ABC):
ast2sc_q = None # depends on the shape model coordinate frame
def __init__(self, *args, shape_model=None, **kwargs):
super(Asteroid, self).__init__()
self.name = None # (NOT IN USE)
self.image_db_path = None
self.target_model_file = None
self.hires_target_model_file = None
self.hires_target_model_file_textures = False
# shape model related
self.render_smooth_faces = False # when rendering shape model, smooth faces instead of angular ones
self.real_shape_model = None # loaded at overriding class __init__
self.real_sc_ast_vertices = None
self.reflmod_params = None
self.real_position = None # transient, loaded from image metadata at iotools.lblloader
self.max_radius = None # in meters, maximum extent of object from asteroid frame coordinate origin
self.mean_radius = None # in meters
# for cross section (probably not in (good) use)
self.mean_cross_section = None # in m2
# epoch for orbital elements
self.oe_epoch = None # as astropy.time.Time
# orbital elements
self.eccentricity = None # unitless
self.semimajor_axis = None # with astropy.units
self.inclination = None # in rads
self.longitude_of_ascending_node = None # in rads
self.argument_of_periapsis = None # in rads
self.mean_anomaly = None # in rads
# other
self.aphelion = None # in rads
self.perihelion = None # in rads
self.orbital_period = None # in seconds
# rotation period
self.rot_epoch = None # as astropy.time.Time
self.rotation_velocity = None # in rad/s
self.rotation_pm = None # in rads
self.axis_latitude = None # in rads
self.axis_longitude = None # in rads
self.precession_cone_radius = None # in rads (NOT IN USE)
self.precession_period = None # in seconds (NOT IN USE)
self.precession_pm = None # in rads (NOT IN USE)
# default values of axis that gets changed during monte-carlo simulation at testloop.py
self.def_rotation_pm = None
self.def_axis_latitude = None
self.def_axis_longitude = None
def set_defaults(self):
self.def_rotation_pm = self.rotation_pm # in rads
self.def_axis_latitude = self.axis_latitude # in rads
self.def_axis_longitude = self.axis_longitude # in rads
def reset_to_defaults(self):
self.rotation_pm = self.def_rotation_pm # in rads
self.axis_latitude = self.def_axis_latitude # in rads
self.axis_longitude = self.def_axis_longitude # in rads
@property
def rotation_period(self):
return 2 * math.pi / self.rotation_velocity
@lru_cache(maxsize=1)
def rot_epoch_unix(self):
return (self.rot_epoch - Time(0, format='unix')).sec
def rotation_theta(self, timestamp):
dt = timestamp - self.rot_epoch_unix()
theta = (self.rotation_pm + self.rotation_velocity * dt) % (2 * math.pi)
return theta
def rotation_q(self, timestamp):
theta = self.rotation_theta(timestamp)
# TODO: use precession info
# orient z axis correctly, rotate around it
return tools.ypr_to_q(self.axis_latitude, self.axis_longitude, theta) | |
hmm_group_config_path = os.path.join(self.dbCAN_HMMS_DIR, 'dbCAN-categories.txt')
HMM_fam_config_dir = os.path.join(self.dbCAN_HMMS_DIR, 'dbCAN-fams')
HMM_fam_input_dir = os.path.join(self.output_dir, 'HMMs')
with open(hmm_group_config_path, 'r', 0) as hmm_group_config_handle:
for hmm_group_config_line in hmm_group_config_handle.readlines():
hmm_group_config_line = hmm_group_config_line.rstrip()
hmm_group = hmm_group_config_line.split("\t")[0]
all_HMM_groups_order.append(hmm_group)
all_HMM_ids[hmm_group] = []
HMM_fam_config_path = os.path.join(HMM_fam_config_dir, 'dbCAN-' + hmm_group + '.txt')
with open(HMM_fam_config_path, 'r', 0) as hmm_fam_config_handle:
for hmm_fam_config_line in hmm_fam_config_handle.readlines():
hmm_fam_config_line = hmm_fam_config_line.rstrip()
hmm_fam_config = hmm_fam_config_line.split("\t")
hmm_fam_id = hmm_fam_config[0]
if len(hmm_fam_config) > 1:
input_HMM_descs[hmm_fam_id] = hmm_fam_config[1]
else:
input_HMM_descs[hmm_fam_id] = hmm_fam_id
all_HMM_ids[hmm_group].append(hmm_fam_id)
all_HMM_ids_order.append(hmm_fam_id)
#### get the specific input HMM ids requested
##
input_HMM_ids = dict()
for hmm_group in all_HMM_groups_order:
input_HMM_ids[hmm_group] = []
input_field = 'input_dbCAN_' + hmm_group + 'ids'
if input_field in params and params[input_field] != None and len(params[input_field]) > 0:
only_none_found = True
for HMM_fam in params[input_field]:
if HMM_field == 'none':
continue
only_none_found = False
input_HMM_ids[hmm_group].append(HMM_fam)
if only_none_found:
input_HMM_ids[hmm_group] = []
else: # default: use all
input_HMM_ids[hmm_group] = all_HMM_ids[hmm_group]
# check for failed input file creation
#
if not appropriate_sequence_found_in_many_input:
self.log(invalid_msgs, "no protein sequences found in '" + input_many_name + "'")
# input data failed validation. Need to return
#
if len(invalid_msgs) > 0:
# load the method provenance from the context object
#
self.log(console, "SETTING PROVENANCE") # DEBUG
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
# add additional info to provenance here, in this case the input data object reference
provenance[0]['input_ws_objects'] = []
# provenance[0]['input_ws_objects'].append(input_one_ref)
provenance[0]['input_ws_objects'].append(input_many_ref)
provenance[0]['service'] = 'kb_hmmer'
provenance[0]['method'] = search_tool_name + '_Search'
# build output report object
#
self.log(console, "BUILDING REPORT") # DEBUG
report += "FAILURE:\n\n" + "\n".join(invalid_msgs) + "\n"
reportObj = {
'objects_created': [],
'text_message': report
}
reportName = 'hmmer_report_' + str(uuid.uuid4())
ws = workspaceService(self.workspaceURL, token=ctx['token'])
report_obj_info = ws.save_objects({
#'id':info[6],
'workspace': params['workspace_name'],
'objects': [
{
'type': 'KBaseReport.Report',
'data': reportObj,
'name': reportName,
'meta': {},
'hidden': 1,
'provenance': provenance # DEBUG
}
]
})[0]
self.log(console, "BUILDING RETURN OBJECT")
returnVal = {'report_name': reportName,
'report_ref': str(report_obj_info[6]) + '/' + str(report_obj_info[0]) + '/' + str(report_obj_info[4]),
}
self.log(console, search_tool_name + "_Search DONE")
return [returnVal]
#### Iterate through categories and make separate Search HITs for each category
##
hmm_groups_used = []
for hmm_group in all_HMM_groups_order:
if hmm_group not in input_HMM_ids or input_HMM_ids[hmm_group] == None or len(input_HMM_ids[hmm_group]) == 0:
continue
else:
hmm_groups_used.append(hmm_group)
# Group loop
total_hit_cnts = dict()
accepted_hit_cnts = dict()
hit_cnt_by_genome_and_model = dict()
hit_accept_something = dict()
output_hit_TAB_file_paths = dict()
output_hit_MSA_file_paths = dict()
objects_created_refs_coalesce = dict()
objects_created_refs_by_hmm_id = dict()
for hmm_group_i, hmm_group in enumerate(all_HMM_groups_order):
self.log(console, "PROCESSING HMM GROUP: " + hmm_group) # DEBUG
hit_accept_something[hmm_group] = False
if hmm_group not in hmm_groups_used:
for hmm_id in all_HMM_ids[hmm_group]:
total_hit_cnts[hmm_id] = 0
accepted_hit_cnts[hmm_id] = 0
continue
## iterate through HMMs and scan input_many DBs
#
output_filtered_fasta_file_paths = []
output_hits_flags = []
coalesced_sequenceObjs = []
coalesce_featureIds_element_ordering = []
coalesce_featureIds_genome_ordering = []
html_report_chunks = []
# HMM loop
for hmm_i, hmm_id in enumerate(input_HMM_ids[hmm_group]):
self.log(console, "PROCESSING HMM: " + hmm_id) # DEBUG
# init hit counts
total_hit_cnts[hmm_id] = 0
accepted_hit_cnts[hmm_id] = 0
html_report_chunks.append(None)
# set paths
#
hmmer_dir = os.path.join(self.output_dir, hmm_id) # this must match above
if not os.path.exists(hmmer_dir):
os.makedirs(hmmer_dir)
HMM_file_path = os.path.join(hmmer_dir, hmm_id + ".hmm")
# create HMM file
with open(HMM_file_path, 'w', 0) as hmm_handle:
hmm_handle.write("\n".join(HMM_bufs[hmm_id]) + "\n")
if not os.path.isfile(HMM_file_path):
raise ValueError("HMMER_BUILD failed to create HMM file '" + HMM_file_path + "'")
elif not os.path.getsize(HMM_file_path) > 0:
raise ValueError("HMMER_BUILD created empty HMM file '" + HMM_file_path + "'")
### Construct the HMMER_SEARCH command
#
# SYNTAX (from http://eddylab.org/software/hmmer3/3.1b2/Userguide.pdf)
#
# hmmsearch --tblout <TAB_out> -A <MSA_out> --noali --notextw -E <e_value> -T <bit_score> <hmmfile> <seqdb>
#
hmmer_search_bin = self.HMMER_SEARCH
hmmer_search_cmd = [hmmer_search_bin]
# check for necessary files
if not os.path.isfile(hmmer_search_bin):
raise ValueError("no such file '" + hmmer_search_bin + "'")
if not os.path.isfile(HMM_file_path):
raise ValueError("no such file '" + HMM_file_path + "'")
elif not os.path.getsize(HMM_file_path):
raise ValueError("empty file '" + HMM_file_path + "'")
if not os.path.isfile(many_forward_reads_file_path):
raise ValueError("no such file '" + many_forward_reads_file_path + "'")
elif not os.path.getsize(many_forward_reads_file_path):
raise ValueError("empty file '" + many_forward_reads_file_path + "'")
output_hit_TAB_file_path = os.path.join(hmmer_dir, hmm_id + '.hitout.txt')
output_hit_MSA_file_path = os.path.join(hmmer_dir, hmm_id + '.msaout.txt')
output_filtered_fasta_file_path = os.path.join(hmmer_dir, hmm_id + '.output_filtered.fasta')
output_hit_TAB_file_paths[hmm_id] = output_hit_TAB_file_path
output_hit_MSA_file_paths[hmm_id] = output_hit_MSA_file_path
output_filtered_fasta_file_paths.append(output_filtered_fasta_file_path)
# this is command for basic search mode
hmmer_search_cmd.append('--tblout')
hmmer_search_cmd.append(output_hit_TAB_file_path)
hmmer_search_cmd.append('-A')
hmmer_search_cmd.append(output_hit_MSA_file_path)
hmmer_search_cmd.append('--noali')
hmmer_search_cmd.append('--notextw')
hmmer_search_cmd.append('-E') # can't use -T with -E, so we'll use -E
hmmer_search_cmd.append(str(params['e_value']))
hmmer_search_cmd.append(HMM_file_path)
hmmer_search_cmd.append(many_forward_reads_file_path)
# options
#if 'maxaccepts' in params:
# if params['maxaccepts']:
# hmmer_search_cmd.append('-max_target_seqs')
# hmmer_search_cmd.append(str(params['maxaccepts']))
# Run HMMER, capture output as it happens
#
#self.log(console, 'RUNNING HMMER_SEARCH:')
#self.log(console, ' '+' '.join(hmmer_search_cmd))
#report += "\n"+'running HMMER_SEARCH:'+"\n"
#report += ' '+' '.join(hmmer_search_cmd)+"\n"
p = subprocess.Popen(hmmer_search_cmd,
cwd=self.output_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False)
while True:
line = p.stdout.readline()
if not line:
break
#self.log(console, line.replace('\n', ''))
p.stdout.close()
p.wait()
#self.log(console, 'return code: ' + str(p.returncode))
if p.returncode != 0:
raise ValueError('Error running HMMER_SEARCH, return code: ' + str(p.returncode) +
'\n\n' + '\n'.join(console))
# Check for output
if not os.path.isfile(output_hit_TAB_file_path):
raise ValueError("HMMER_SEARCH failed to create TAB file '" + output_hit_TAB_file_path + "'")
elif not os.path.getsize(output_hit_TAB_file_path) > 0:
raise ValueError("HMMER_SEARCH created empty TAB file '" + output_hit_TAB_file_path + "'")
if not os.path.isfile(output_hit_MSA_file_path):
raise ValueError("HMMER_SEARCH failed to create MSA file '" + output_hit_MSA_file_path + "'")
elif not os.path.getsize(output_hit_MSA_file_path) > 0:
#raise ValueError("HMMER_SEARCH created empty MSA file '"+output_hit_MSA_file_path+"'")
#self.log(console,"HMMER_SEARCH created empty MSA file '"+output_hit_MSA_file_path+"'")
self.log(console, "\tHMMER_SEARCH: No hits")
continue
# DEBUG
#self.log(console, "DEBUG: output_hit_TAB_file_path: '"+str(output_hit_TAB_file_path))
#self.log(console, "DEBUG: output_hit_MSA_file_path: '"+str(output_hit_MSA_file_path))
#report = "TAB:\n\n"
#with open (output_hit_TAB_file_path, 'r') as output_handle:
# for line in output_handle:
# report += line+"\n"
#report += "\n\nMSA:\n\n"
#with open (output_hit_MSA_file_path, 'r') as output_handle:
# for line in output_handle:
# report += line+"\n"
#self.log(console, report)
# Get hit beg and end positions from Stockholm format MSA output
#
#self.log(console, 'PARSING HMMER SEARCH MSA OUTPUT')
hit_beg = dict()
hit_end = dict()
longest_alnlen = dict()
with open(output_hit_MSA_file_path, 'r', 0) as output_hit_MSA_file_handle:
for MSA_out_line in output_hit_MSA_file_handle.readlines():
MSA_out_line = MSA_out_line.strip()
if MSA_out_line.startswith('#=GS '):
hit_rec = re.sub('#=GS ', '', MSA_out_line)
hit_rec = re.sub('\s+.*?$', '', hit_rec)
hit_range = re.sub('^.*\/', '', hit_rec)
hit_id = re.sub('\/[^\/]+$', '', hit_rec)
(beg_str, end_str) = hit_range.split('-')
beg = int(beg_str)
end = int(end_str)
this_alnlen = abs(end - beg) + 1
if hit_id in hit_beg:
if this_alnlen > longest_alnlen[hit_id]:
hit_beg[hit_id] = int(beg_str)
hit_end[hit_id] = int(end_str)
longest_alnlen[hit_id] = this_alnlen
#self.log(console, "ADDING HIT_BEG for "+hit_id) # DEBUG
else:
hit_beg[hit_id] = int(beg_str)
hit_end[hit_id] = int(end_str)
longest_alnlen[hit_id] = this_alnlen
#self.log(console, "ADDING HIT_BEG for "+hit_id) # DEBUG
# Measure length of hit sequences
#
#self.log(console, 'MEASURING HIT GENES LENGTHS')
hit_seq_len = dict()
with open(many_forward_reads_file_path, 'r', 0) as many_forward_reads_file_handle:
last_id = None
last_buf = ''
for fasta_line in many_forward_reads_file_handle.readlines():
fasta_line = fasta_line.strip()
if fasta_line.startswith('>'):
if last_id != None:
id_untrans = last_id
# BLAST seems to make this translation now when id format has simple 'kb|blah' format
id_trans = re.sub('\|', ':', id_untrans)
#if id_untrans in hit_order or id_trans in hit_order:
if id_untrans in hit_beg or id_trans in hit_beg:
hit_seq_len[id_untrans] = len(last_buf)
#self.log(console, "ADDING HIT_SEQ_LEN for "+id_untrans) # DEBUG
header = re.sub('^>', '', fasta_line)
last_id = re.sub('\s+.*?$', '', header)
last_buf = ''
else:
last_buf += fasta_line
if last_id != None:
id_untrans = last_id
# BLAST seems to make this translation now when id format has simple 'kb|blah' format
id_trans = re.sub('\|', ':', id_untrans)
#if id_untrans in hit_order or id_trans in hit_order:
if id_untrans in hit_beg or id_trans in hit_beg:
hit_seq_len[id_untrans] = len(last_buf)
#self.log(console, "ADDING HIT_SEQ_LEN for "+id_untrans) # DEBUG
### Parse the HMMER tabular output and store ids to filter many set to make filtered object to save back to KBase
#
#self.log(console, 'PARSING HMMER SEARCH TAB OUTPUT')
hit_seq_ids = dict()
accept_fids = dict()
output_hit_TAB_file_handle = open(output_hit_TAB_file_path, "r", 0)
output_aln_buf = output_hit_TAB_file_handle.readlines()
output_hit_TAB_file_handle.close()
accepted_hit_cnt = 0
high_bitscore_line = dict()
high_bitscore_score = dict()
#high_bitscore_ident = dict()
#longest_alnlen = dict()
hit_order = []
hit_buf = []
#header_done = False
for line in output_aln_buf:
if line.startswith('#'):
#if not header_done:
# hit_buf.append(line)
continue
#header_done = True
#self.log(console,'HIT LINE: '+line) # DEBUG
hit_info = re.split('\s+', line)
hit_seq_id = hit_info[0]
hit_accession = hit_info[1]
query_name = hit_info[2]
query_accession = hit_info[3]
hit_e_value = float(hit_info[4])
hit_bitscore = float(hit_info[5])
hit_bias = float(hit_info[6])
hit_e_value_best_dom = float(hit_info[7])
| |
<reponame>AlainDaccache/Quantropy
from datetime import timedelta
from matilda.fundamental_analysis.supporting_metrics import *
from matilda.fundamental_analysis.financial_statements import *
'''
Profitability ratios measure a company’s ability to generate income relative to revenue, balance sheet assets, operating costs, and equity.
'''
def net_profit_margin(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'FY'):
"""
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Net Profit Margin} = \\frac{\\text{Net Income}}{\\text{Net Sales}}
"""
return net_income(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ net_sales(stock=stock, date=date, lookback_period=lookback_period, period=period)
def gross_profit_margin(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'FY'):
"""
The gross margin ratio compares the gross profit of a company to its net sales to show how much profit a company makes after paying its cost of goods sold
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Gross Margin Ratio} = \\frac{\\text{Gross Profit}}{\\text{Net Sales}}
"""
return gross_profit(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ net_sales(stock=stock, date=date, lookback_period=lookback_period, period=period)
def operating_profit_margin(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'FY'):
"""
The operating margin ratio compares the operating income of a company to its net sales to determine operating efficiency
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Operating Margin Ratio} = \\frac{\\text{Operating Income}}{\\text{Net Sales}}
"""
return operating_income(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ net_sales(stock=stock, date=date, lookback_period=lookback_period, period=period)
def return_on_assets(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'FY'):
"""
The return on assets ratio measures how efficiently a company is using its assets to generate profit
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Return on Assets} = \\frac{\\text{Net Income}}{\\text{Total Assets}}
"""
return net_income(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ total_assets(stock=stock, date=date, lookback_period=lookback_period, period=period)
def return_on_equity(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'FY'):
"""
The return on equity ratio measures how efficiently a company is using its equity to generate profit
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Return on Equity} = \\frac{\\text{Net Income}}{\\text{Shareholder’s equity}}
"""
return net_income(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ total_shareholders_equity(stock=stock, date=date, lookback_period=lookback_period, period=period)
def return_on_net_assets(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'FY'):
"""
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Return on Net Assets} = \\frac{\\text{Net Income}}{\\text{Fixed Assets + Working Capital}}
"""
return net_income(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ (net_property_plant_equipment(stock=stock, date=date, lookback_period=lookback_period, period=period)
+ net_working_capital(stock=stock, date=date, lookback_period=lookback_period, period=period))
def return_on_invested_capital(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'FY',
invested_capital_operating_approach=True):
"""
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:param invested_capital_operating_approach:
:return: .. math:: \\text{Return on Invested Capital} = \\frac{\\text{NOPAT}}{\\text{Invested Capital}}
"""
return net_operating_profit_after_tax(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ invested_capital(stock=stock, date=date, lookback_period=lookback_period, period=period,
operating_approach=invested_capital_operating_approach)
def return_on_capital_employed(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'FY'):
"""
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Return on Capital Employed} = \\frac{\\text{EBIT}}{\\text{Capital Employed}}
"""
return earnings_before_interest_and_taxes(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ capital_employed(stock=stock, date=date, lookback_period=lookback_period, period=period)
def cash_flow_return_on_investment(stock, date=None, lookback_period: timedelta = timedelta(days=0),
period: str = 'FY'):
"""
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Cash Flow Return on Investment} = \\frac{\\text{Cash Flow}}{\\text{Market Recapitalisation}}
"""
pass
def efficiency_ratio(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'FY'):
"""
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetinow().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Efficiency Ratio} = \\frac{\\text{Non-Interest Expense}}{\\text{Revenue}}
"""
pass
def net_gearing(stock, date=None, lookback_period: timedelta = | |
self.loc: DistributionIF = FloatConst(loc)
elif isinstance(loc, int):
self.loc = FloatConst(float(loc))
else:
self.loc = loc
if isinstance(scale, float):
self.scale: DistributionIF = FloatConst(scale)
elif isinstance(loc, int):
self.scale = FloatConst(float(scale))
else:
self.scale = scale
return
def __repr__(self) -> str:
return (f"{self.__class__.__name__}"
f"({repr(self.a)}, {repr(self.loc)}, {repr(self.scale)})")
def get(self) -> float:
""" Return a random value from gamma distribution.
Examples:
>>> for _ in range(100):
... x = Gamma(1, 0, 0.627).get()
... assert isinstance(x, float)
... assert 0 <= x
"""
a = self.a.get()
loc = self.loc.get()
scale = self.scale.get()
rv = stats.gamma.rvs(
a=a,
loc=loc,
scale=scale,
size=None,
)
return float(rv)
def get_many(self, n: int) -> List[float]:
""" Return n random values from the gamma distribution.
Examples:
>>> for x in Gamma(1, 1, 100).get_many(100):
... assert isinstance(x, float)
... assert 1 <= x
"""
a = self.a.get()
loc = self.loc.get()
scale = self.scale.get()
rv = stats.gamma.rvs(
a=a,
loc=loc,
scale=scale,
size=n,
)
return rv.tolist()
def seq(self, n: int) -> List[float]:
""" Return n quantiles from the gamma distribution.
Examples:
>>> for x in Gamma(1, 5, 1).seq(100):
... assert isinstance(x, float)
... assert 5 <= x
"""
a = self.a.get()
loc = self.loc.get()
scale = self.scale.get()
quantiles = np.linspace(0.01, 0.99, num=n)
return (stats.gamma
.ppf(quantiles, a=a, loc=loc, scale=scale)
.astype(float)
.tolist())
class Normal(PDF):
def __init__(
self,
loc: DistributionIF,
scale: DistributionIF
) -> None:
""" Take a random float from the normal distribution. """
if isinstance(loc, float):
self.loc: DistributionIF = FloatConst(loc)
elif isinstance(loc, int):
self.loc = FloatConst(float(loc))
else:
self.loc = loc
if isinstance(scale, float):
self.scale: DistributionIF = FloatConst(scale)
elif isinstance(loc, int):
self.scale = FloatConst(float(scale))
else:
self.scale = scale
return
def __repr__(self) -> str:
return (f"{self.__class__.__name__}"
"({repr(self.loc)}, {repr(self.scale)})")
def get(self) -> float:
""" Return a random value from normal distribution.
Examples:
>>> for _ in range(100):
... x = Normal(0, 0.627).get()
... assert isinstance(x, float)
"""
loc = self.loc.get()
scale = self.scale.get()
rv = stats.norm.rvs(
loc=loc,
scale=scale,
size=None,
)
return float(rv)
def get_many(self, n: int) -> List[float]:
""" Return n random values from the normal distribution.
Examples:
>>> for x in Normal(1, 100).get_many(100):
... assert isinstance(x, float)
"""
loc = self.loc.get()
scale = self.scale.get()
rv = stats.norm.rvs(
loc=loc,
scale=scale,
size=n,
)
return rv.tolist()
def seq(self, n: int) -> List[float]:
""" Return n quantiles from the normal distribution.
Examples:
>>> for x in Normal(5, 1).seq(100):
... assert isinstance(x, float)
"""
loc = self.loc.get()
scale = self.scale.get()
quantiles = np.linspace(0.01, 0.99, num=n)
return (stats.norm
.ppf(quantiles, loc=loc, scale=scale)
.astype(float)
.tolist())
class Range(PMF):
def __init__(self, min: Distribution[int], max: Distribution[int]) -> None:
""" Take a random int between min and max. """
if isinstance(min, int):
self.min: Distribution[int] = IntConst(min)
else:
self.min = min
if isinstance(max, int):
self.max: Distribution[int] = IntConst(max)
else:
self.max = max
return
def __repr__(self) -> str:
return f"{self.__class__.__name__}({repr(self.min)}, {repr(self.max)})"
def get(self) -> int:
""" Return a random integer between min and max.
Examples:
>>> for _ in range(100):
... x = Range(0, 5).get()
... assert isinstance(x, int)
... assert 0 <= x <= 5
"""
min_ = self.min.get()
max_ = self.max.get() + 1
return stats.randint.rvs(min_, max_)
def get_many(self, n: int) -> List[int]:
""" Return n random values between min and max.
Examples:
>>> for x in Range(0, 10).get_many(100):
... assert isinstance(x, int)
... assert 0 <= x <= 10
"""
min_ = self.min.get()
max_ = self.max.get() + 1
rv = stats.randint.rvs(min_, max_, size=n)
return rv.tolist()
def seq(self, n: int) -> List[int]:
""" Return up to n unique quantiles between min and max.
Examples:
>>> for x in Range(0, 5).seq(100):
... assert isinstance(x, int)
... assert 0 <= x <= 5
"""
min_ = self.min.get()
max_ = self.max.get() + 1
li = list(range(min_, max_))
return self._find_boundaries(li, n)
@staticmethod
def _inner_boundaries(li: List[int], p: int) -> List[int]:
boundaries = list()
li = list(li)
while p > 1 and len(li) > 0:
i = len(li) // p
boundaries.append(li[i])
p -= 1
li = li[i + 1:]
return boundaries
@classmethod
def _find_boundaries(cls, li: List[int], n: int) -> List[int]:
if len(li) <= n:
return li
elif n == 1:
return [li[len(li) // 2]]
head = li[0]
tail = li[-1]
n -= 1
if n < 2:
return [head, tail]
else:
return [head] + cls._inner_boundaries(li[1: -1], n) + [tail]
class Binomial(PMF):
def __init__(
self,
n: Distribution[int],
p: Distribution[float],
loc: Distribution[int] = IntConst(0),
) -> None:
""" Take a random integer from a binomial distribution. """
if isinstance(n, int):
self.n: Distribution[int] = IntConst(n)
else:
self.n = n
if isinstance(loc, int):
self.loc: Distribution[int] = IntConst(loc)
else:
self.loc = loc
if isinstance(p, float):
self.p: DistributionIF = FloatConst(p)
else:
self.p = p
return
def __repr__(self) -> str:
return (f"{self.__class__.__name__}"
f"({repr(self.n)}, {repr(self.p)}, {repr(self.loc)})")
def get(self) -> int:
""" Return a random integer from the binomial distribution.
Examples:
>>> for _ in range(100):
... x = Binomial(3, 0.5, 0).get()
... assert isinstance(x, int)
"""
n = self.n.get()
loc = self.loc.get()
p = self.p.get()
return stats.binom.rvs(n, p, loc=loc, size=None)
def get_many(self, n: int) -> List[int]:
""" Return n random integers from a binomial distribution.
Examples:
>>> for x in Binomial(3, 0.5, 1).get_many(100):
... assert isinstance(x, int)
... assert 1 <= x
"""
n_ = self.n.get()
loc = self.loc.get()
p = self.p.get()
rv = stats.binom.rvs(n_, p, loc=loc, size=n)
return rv.tolist()
def seq(self, n: int) -> List[int]:
""" Return up to n unique quantiles from a binomial distribution.
Examples:
>>> for x in Binomial(3, 0.2, 1).seq(10):
... assert isinstance(x, int), "not int"
... assert 1 <= x <= 4, "domain wrong"
"""
n_ = self.n.get()
loc = self.loc.get()
p = self.p.get()
quantiles = np.linspace(0.01, 0.99, n)
return (np.unique(stats.binom.ppf(quantiles, n_, p, loc=loc))
.astype(int)
.tolist())
class NegBinomial(PMF):
def __init__(
self,
n: Distribution[int],
p: Distribution[float],
loc: Distribution[int] = IntConst(0),
) -> None:
""" Take a random int from the negative binomial distribution. """
if isinstance(n, int):
self.n: Distribution[int] = IntConst(n)
elif isinstance(n, float):
raise ValueError(n)
else:
self.n = n
if isinstance(loc, int):
self.loc: Distribution[int] = IntConst(loc)
else:
self.loc = loc
if isinstance(p, float):
self.p: Distribution[float] = FloatConst(p)
else:
self.p = p
return
def __repr__(self) -> str:
return (f"{self.__class__.__name__}"
f"({repr(self.n)}, {repr(self.p)}, {repr(self.loc)})")
def get(self) -> int:
""" Return a random integer from the negative binomial distribution.
Examples:
>>> for _ in range(100):
... x = NegBinomial(3, 0.5, 0).get()
... assert isinstance(x, int)
... assert x >= 0
"""
n = self.n.get()
loc = self.loc.get()
p = self.p.get()
return stats.nbinom.rvs(n, p, loc=loc, size=None)
def get_many(self, n: int) -> List[int]:
""" Return n random values from the negative binomial distribution.
Examples:
>>> for x in NegBinomial(3, 0.5, 1).get_many(100):
... assert isinstance(x, int)
... assert 1 <= x
"""
n_ = self.n.get()
loc = self.loc.get()
p = self.p.get()
rv = stats.nbinom.rvs(n_, p, loc=loc, size=n)
return rv.tolist()
def seq(self, n: int) -> List[int]:
""" Return n random values from the negative binomial distribution.
Examples:
>>> for x in NegBinomial(3, 0.2, 1).seq(10):
... assert isinstance(x, int), "not int"
... assert 1 <= x
"""
n_ = self.n.get()
loc = self.loc.get()
p = self.p.get()
quantiles = np.linspace(0.01, 0.99, n)
return (np.unique(stats.nbinom.ppf(quantiles, n_, p, loc=loc))
.astype(int)
.tolist())
class HyperGeometric(PMF):
def __init__(
self,
n_success: Distribution[int],
n: Distribution[int],
population: Distribution[int],
loc: Distribution[int] = IntConst(0),
) -> None:
""" Take a random int from the hypergeometric distribution.
"""
if isinstance(n_success, int):
self.n_success: Distribution[int] = IntConst(n_success)
elif isinstance(n_success, float):
raise ValueError(n_success)
else:
self.n_success = n_success
if isinstance(n, int):
self.n: Distribution[int] = IntConst(n)
elif isinstance(n, float):
raise ValueError(n)
else:
self.n = n
if isinstance(population, int):
self.population: Distribution[int] = IntConst(population)
elif isinstance(population, float):
raise ValueError(population)
else:
self.population = population
if isinstance(loc, int):
self.loc: Distribution[int] = IntConst(loc)
else:
self.loc = loc
return
def __repr__(self) -> str:
return (f"{self.__class__.__name__}"
f"({repr(self.n_success)}, {repr(self.n)}, "
f"{repr(self.population)}, {repr(self.loc)})")
def get(self) -> int:
""" Return a random integer from the hypergeometric distribution.
Examples:
>>> for _ in range(100):
... x = HyperGeometric(7, 12, 20, 0).get()
... assert isinstance(x, int)
... assert x >= 0
"""
n_success = self.n_success.get()
n = self.n.get()
population = self.population.get()
loc = self.loc.get()
return stats.hypergeom.rvs(
M=population,
| |
import csv
import sys
import re
from collections import OrderedDict
###################### globals ##########################
DB_DIR = "./files/"
META_FILE = "./files/metadata.txt"
AGGREGATE = ["min", "max", "sum", "avg", "count", "distinct"]
###################### functions ##########################
def make_schema(filename):
''' read the schema into the dict '''
try:
out = {}
flag = 0
with open(filename) as f:
temp = f.readlines()
for x in temp:
if x.strip() == "<end_table>":
flag = 0
cur = ""
if flag == 1:
cur = x.strip()
out[cur] = []
flag = 2
elif flag == 2:
out[cur].append(x.strip())
if x.strip() == "<begin_table>":
flag = 1
return out
except:
sys.exit("Couldn't read metadata file.")
def populate_fields(tName):
try:
fetched_data = []
# print("in read file")
with open(tName,'r') as f:
reader = csv.reader(f)
for row in reader:
fetched_data.append(row)
return fetched_data
except:
sys.exit("Couldn't open file")
def join_tables(c_names,t_names,schema):
print("in join_tables ")
try:
# t_names.reverse()
t_names[0],t_names[1] = t_names[1],t_names[0]
schema["basic"] = schema[t_names[1]] + schema[t_names[0]]
schema["full"] = []
for i in schema[t_names[0]]:
schema["full"].append(t_names[0] + '.' + i)
for i in schema[t_names[1]]:
schema["full"].append(t_names[1] + '.' + i)
fetched_data = []
list1 = populate_fields(DB_DIR + t_names[0] + '.csv')
list2 = populate_fields(DB_DIR + t_names[1] + '.csv')
t_names.remove(t_names[0])
t_names.remove(t_names[0])
t_names.insert(0,"full")
for item1 in list1:
for item2 in list2:
fetched_data.append(item2 + item1)
if(c_names[0] == '*' and len(c_names) == 1):
c_names = schema[t_names[0]]
# print header
for i in range(len(c_names)):
print(c_names[i], end=" ")
print("\n")
print("schema[basic]", schema['basic'])
for data in fetched_data:
for col in c_names:
if '.' in col:
print(data[schema[t_names[0]].index(col)], end = "\t")
elif schema['basic'].count(col) > 1:
sys.exit("\nError: Column occurs in more than one table. Please flag!\n\n")
else:
print(data[schema["basic"].index(str(col))], end = "\t")
print("\n")
del schema['full']
del schema['basic']
except:
sys.exit("Error! Please flag syntax")
def processQuery(query,schema):
# print("in processQuery")
# try:
if "from" not in query:
sys.exit("Incorrect Syntax: No FROM keyword exists in query.")
query = (re.sub(' +',' ',query)).strip();
query = query.split(';')
query = query[0]
# else:
item_1 = query.split('from');
# removing the space before "from" in query
item_1[0] = (re.sub(' +',' ',item_1[0])).strip();
items = []
items.append(0)
if "select" not in item_1[0].lower():
sys.exit("Incorrect Syntax: query should begin with the SELECT keyword.")
items.append(item_1[0][7:]) # appending the part after select to items
items[1] = (re.sub(' +',' ',items[1])).strip();
l = []
l.append("select")
if "distinct" in items[1]:
items[1] = items[1][9:]
l.append("distinct")
l.append(items[1])
items[1] = l
item_new = ""
if "distinct" in items[1][1]:
item_new = items[1][1];
item_new = (re.sub(' +',' ',item_new)).strip()
items[1][1] = items[1][2]
item_1[1] = (re.sub(' +',' ',item_1[1])).strip();
temp = item_1[1].split('where');
colStr = items[1][1];
colStr = (re.sub(' +',' ',colStr)).strip()
items.append(temp)
c_names = colStr.split(',');
# storing the column names in c_names after removing spaces
for i in range(len(c_names)):
c_names[c_names.index(c_names[i])] = (re.sub(' +',' ',c_names[i])).strip();
tableStr = items[2][0]
tableStr = (re.sub(' +',' ',tableStr)).strip();
t_names = tableStr.split(',')
for i in range(len(t_names)):
t_names[t_names.index(t_names[i])] = (re.sub(' +',' ',t_names[i])).strip();
for i in range(len(t_names)):
if t_names[i] not in schema.keys():
sys.exit("Error: Table doesn't exist! Please flag.")
res = [i for i in c_names if '(' in i]
if len(t_names) == 1:
if len(items[2]) > 1:
items[2][0] = (re.sub(' +',' ',items[2][0])).strip()
items[2][1] = (re.sub(' +',' ',items[2][1])).strip()
processWhere(items[2][1],c_names,t_names,schema)
return
else:
if len(items[2]) > 1 and len(res) == 0:
items[2][0] = (re.sub(' +',' ',items[2][0])).strip()
items[2][1] = (re.sub(' +',' ',items[2][1])).strip()
processWhereJoin(items[2][1],c_names,t_names,schema)
return
elif len(items[2]) > 1 and len(res) == 1:
items[2][0] = (re.sub(' +',' ',items[2][0])).strip()
items[2][1] = (re.sub(' +',' ',items[2][1])).strip()
if not len(c_names) == 1:
sys.exit("Incorrect syntax!\n")
names = []
a1 = c_names[0].split('(')
names.append((re.sub(' +',' ',a1[0])).strip())
names.append(a1[1][0])
if names[1] not in schema[t_names[0]] and names[1] not in schema[t_names[1]]:
sys.exit("Error: Column doesn't exist in the table\n")
elif names[1] == '*':
sys.exit("Error: Please flag sytax!\n")
whereJoinAggregate(items[2][1],names[0],names[1],t_names,schema)
return
join_tables(c_names,t_names,schema)
return
if item_new == "distinct":
# print("here")
distinctMany(c_names,t_names,schema)
return
if len(c_names) == 1:
if '(' in c_names[0] and ')' in c_names[0]:
names = []
a1 = c_names[0].split('(')
names.append((re.sub(' +',' ',a1[0])).strip())
names.append(a1[1][0])
if names[1] not in schema[t_names[0]]:
sys.exit("Error: Column doesn't exist in the table\n")
if names[1] == '*':
sys.exit("Error: Please flag sytax!\n")
colList = []
tName = DB_DIR + t_names[0] + '.csv'
fetched_data = populate_fields(tName)
for data in fetched_data:
colList.append(int(data[schema[t_names[0]].index(names[1])]))
aggregate(names[0],colList,t_names[0],schema)
return
elif '(' in c_names[0] or ')' in c_names[0]:
sys.exit("Syntax error")
selectColumns(c_names,t_names,schema);
# except:
# sys.exit("Error! Please flag syntax")
def whereJoinAggregate(condition, operation, col,t_names,schema):
try:
whr = condition.split(" ")
fetched_data = []
schema["full"] = []
list1 = []
list2 = []
t_names[0], t_names[1] = t_names[1], t_names[0]
for i in range(len(schema[t_names[0]])):
schema["full"].append(t_names[0] + '.' + schema[t_names[0]][i])
for i in range(len(schema[t_names[1]])):
schema["full"].append(t_names[1] + '.' + schema[t_names[1]][i])
schema["basic"] = schema[t_names[1]] + schema[t_names[0]]
list1 = populate_fields(DB_DIR + t_names[0] + '.csv')
list2 = populate_fields(DB_DIR + t_names[1] + '.csv')
for item1 in list1:
for item2 in list2:
fetched_data.append(item2 + item1)
t_names.remove(t_names[0])
t_names.remove(t_names[0])
t_names.insert(0,"full")
print(col,"\n")
colList = []
for data in fetched_data:
check_cond = evaluate(whr,t_names,schema,data)
if eval(check_cond):
if '.' in col:
colList.append(data[schema[t_names[0]].index(col)])
elif schema["basic"].count(col) > 1:
sys.exit("Error: Column exists in both the columns. Please specify as table_name.col_name")
else:
colList.append(data[schema["basic"].index(col)])
aggregate(operation,colList,t_names,schema)
del schema['full']
del schema['basic']
except:
sys.exit("Error: Please flag syntax.")
def divide_chunks(l, n):
for i in range(0, len(l), n+1):
yield l[i:i + n]
return l
def evaluate(whr,t_names,schema,data):
check_cond = ""
# check_cond.append("")
for i in whr:
if i.isalpha():
for j in schema[t_names[0]]:
if i in j:
whr[whr.index(i)] = j
# whr = list(divide_chunks(whr, 3))
for i in whr:
if i in schema[t_names[0]] :
check_cond += data[schema[t_names[0]].index(i)]
elif i.lower() == 'and' or i.lower() == 'or':
check_cond += ' ' + i.lower() + ' '
elif i == '=':
check_cond += i*2
else:
check_cond += i
return check_cond
def selectColumns(c_names,t_names,schema):
try:
if c_names[0] == '*' and len(c_names) == 1:
c_names = schema[t_names[0]]
tName = DB_DIR + t_names[0] + '.csv'
fetched_data = populate_fields(tName)
for i in c_names:
if i not in schema[t_names[0]]:
sys.exit("Error: Field doesn't exist for this table. Please flag the query.")
print_cols(c_names,t_names,schema)
print_res(fetched_data,c_names,t_names,schema)
except:
sys.exit('Error: Check syntax')
def processWhere(condition,c_names,t_names,schema):
try:
whr = condition.split(" ")
if(len(c_names) == 1 and c_names[0] == '*'):
c_names = schema[t_names[0]]
fetched_data = populate_fields(DB_DIR + t_names[0] + '.csv')
flag = False
x = c_names[0]
if('(' in x and ')' in x):
names = []
a1 = x.split('(')
a1[1] = (re.sub(' +',' ',a1[1])).split(')')
del(a1[1][1])
names.append((re.sub(' +',' ',a1[0])).strip())
c_names = a1[1]
print_cols(c_names,t_names,schema)
tName = DB_DIR + t_names[0] + '.csv'
fetched_data = populate_fields(tName)
x_data = []
for data in fetched_data:
check_cond = evaluate(whr,t_names,schema,data)
for col in c_names:
if eval(check_cond):
x_data.append(data[schema[t_names[0]].index(col)])
aggregate(a1[0], x_data, t_names[0], schema)
return
elif '(' in x or ')' in x:
sys.exit("Please flag syntax.")
else:
print_cols(c_names,t_names,schema)
for data in fetched_data:
check_cond = evaluate(whr,t_names,schema,data)
for col in c_names:
if eval(check_cond):
flag = 'checked'
print(data[schema[t_names[0]].index(col)],end="\t")
if flag:
flag = False
print("\n")
except:
sys.exit("Error! Please flag syntax")
def processWhereJoin(condition,c_names,t_names,schema):
try:
list1 = []
list2 = []
t_names.reverse()
fetched_data = []
whr = condition.split(" ")
list1 = populate_fields(DB_DIR + t_names[0] + '.csv')
list2 = populate_fields(DB_DIR + t_names[1] + '.csv')
for item1 in list1:
for item2 in list2:
fetched_data.append(item2 + item1)
schema["full"] = []
for i in range(len(schema[t_names[1]])):
schema["full"].append(t_names[1] + '.' + schema[t_names[1]][i])
for i in range(len(schema[t_names[0]])):
schema["full"].append(t_names[0] + '.' + schema[t_names[0]][i])
schema["basic"] = schema[t_names[1]] + schema[t_names[0]]
t_names.remove(t_names[0])
t_names.remove(t_names[0])
t_names.insert(0,"full")
flag = False
if(len(c_names) == 1 and c_names[0] == '*'):
c_names = schema[t_names[0]]
for i in c_names:
print(i, end="\t")
print("\n")
for data in fetched_data:
check_cond = evaluate(whr,t_names,schema,data)
for col in c_names:
if eval(check_cond):
flag = 'checked'
if '.' in col:
print(data[schema[t_names[0]].index(col)], end="\t")
elif schema["basic"].count(col) > 1:
sys.exit("Error: Column exists in both the columns. Please specify as table_name.col_name")
else:
print(data[schema["basic"].index(col)],end="\t")
if flag:
flag = False
print("\n")
else:
pass
del schema['full']
del schema['basic']
except:
sys.exit("Error! Please flag syntax.\n")
def aggregate(func,colList,t_name,schema):
# AGGREGATE = ["min", "max", "sum", "avg", "count", "distinct"]
try:
for i in range(len(colList)):
colList[i] = int(colList[i])
except:
sys.exit("error")
if len(colList) == 0:
print("\n")
else:
if func.lower() == AGGREGATE[2]:
print(sum(colList))
elif func.lower() == AGGREGATE[3]:
print(sum(colList)/len(colList))
elif func.lower() == AGGREGATE[5]:
distinct(colList,c_name,t_name,schema);
elif func.lower() == AGGREGATE[1]:
print(max(colList))
elif func.lower() == AGGREGATE[0]:
print(min(colList))
else :
print("Error: Check the aggregate func!\n")
def distinct(colList,c_name,t_name,schema):
try:
check_cond = t_name + '.' + c_name
print(str(check_cond))
colList = list(OrderedDict.fromkeys(colList))
size_colList = len(colList)
for col in range(size_colList):
print(colList[col],"\n")
| |
'observation_id': 70,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [70],
'message': 'Redecentralization of the Web',
'observation_id': 71,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [71],
'message': 'Redecentralization of the Web',
'observation_id': 72,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [72],
'message': 'Redecentralization of the Web',
'observation_id': 73,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [73],
'message': 'Redecentralization of the Web',
'observation_id': 74,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [74],
'message': 'Redecentralization of the Web',
'observation_id': 75,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [75],
'message': 'Redecentralization of the Web',
'observation_id': 76,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [76],
'message': 'Redecentralization of the Web',
'observation_id': 77,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [77],
'message': 'Redecentralization of the Web',
'observation_id': 78,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [78],
'message': 'Redecentralization of the Web',
'observation_id': 79,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [79],
'message': 'Redecentralization of the Web',
'observation_id': 80,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [80],
'message': 'Redecentralization of the Web',
'observation_id': 81,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [81],
'message': 'Redecentralization of the Web',
'observation_id': 82,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [82],
'message': 'Redecentralization of the Web',
'observation_id': 83,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [83],
'message': 'Redecentralization of the Web',
'observation_id': 84,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [84],
'message': 'Redecentralization of the Web',
'observation_id': 85,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [85],
'message': 'Redecentralization of the Web',
'observation_id': 86,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [86],
'message': 'Redecentralization of the Web',
'observation_id': 87,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [87],
'message': 'Redecentralization of the Web',
'observation_id': 88,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [88],
'message': 'Redecentralization of the Web',
'observation_id': 89,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [89],
'message': 'Redecentralization of the Web',
'observation_id': 90,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [90],
'message': 'Redecentralization of the Web',
'observation_id': 91,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [91],
'message': 'Redecentralization of the Web',
'observation_id': 92,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [92],
'message': 'Redecentralization of the Web',
'observation_id': 93,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [93],
'message': 'Redecentralization of the Web',
'observation_id': 94,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [94],
'message': 'Redecentralization of the Web',
'observation_id': 95,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [95],
'message': 'Redecentralization of the Web',
'observation_id': 96,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [96],
'message': 'Redecentralization of the Web',
'observation_id': 97,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [97],
'message': 'Redecentralization of the Web',
'observation_id': 98,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [98],
'message': 'Redecentralization of the Web',
'observation_id': 99,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [99],
'message': 'Redecentralization of the Web',
'observation_id': 100,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [100],
'message': 'Redecentralization of the Web',
'observation_id': 101,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [101],
'message': 'Redecentralization of the Web',
'observation_id': 102,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [102],
'message': 'Redecentralization of the Web',
'observation_id': 103,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [103],
'message': 'Redecentralization of the Web',
'observation_id': 104,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [104],
'message': 'Redecentralization of the Web',
'observation_id': 105,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [105],
'message': 'Redecentralization of the Web',
'observation_id': 106,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [106],
'message': 'Redecentralization of the Web',
'observation_id': 107,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [107],
'message': 'Redecentralization of the Web',
'observation_id': 108,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [108],
'message': 'Redecentralization of the Web',
'observation_id': 109,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [109],
'message': 'Redecentralization of the Web',
'observation_id': 110,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [110],
'message': 'Redecentralization of the Web',
'observation_id': 111,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [111],
'message': 'Redecentralization of the Web',
'observation_id': 112,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [112],
'message': 'Redecentralization of the Web',
'observation_id': 113,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [113],
'message': 'Redecentralization of the Web',
'observation_id': 114,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [114],
'message': 'Redecentralization of the Web',
'observation_id': 115,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [115],
'message': 'Redecentralization of the Web',
'observation_id': 116,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [116],
'message': 'Redecentralization of the Web',
'observation_id': 117,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [117],
'message': 'Redecentralization of the Web',
'observation_id': 118,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [118],
'message': 'Redecentralization of the Web',
'observation_id': 119,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [119],
'message': 'Redecentralization of the Web',
'observation_id': 120,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [120],
'message': 'Redecentralization of the Web',
'observation_id': 121,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [121],
'message': 'Redecentralization of the Web',
'observation_id': 122,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [122],
'message': 'Redecentralization of the Web',
'observation_id': 123,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [123],
'message': 'Redecentralization of the Web',
'observation_id': 124,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [124],
'message': 'Redecentralization of the Web',
'observation_id': 125,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [125],
'message': 'Redecentralization of the Web',
'observation_id': 126,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [126],
'message': 'Redecentralization of the Web',
'observation_id': 127,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [127],
'message': 'Redecentralization of the Web',
'observation_id': 128,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [128],
'message': 'Redecentralization of the Web',
'observation_id': 129,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [129],
'message': 'Redecentralization of the Web',
'observation_id': 130,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [130],
'message': 'Redecentralization of the Web',
'observation_id': 131,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [131],
'message': 'Redecentralization of the Web',
'observation_id': 132,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [132],
'message': 'Redecentralization of the Web',
'observation_id': 133,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [133],
'message': 'Redecentralization of the Web',
'observation_id': 134,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [134],
'message': 'Redecentralization of the Web',
'observation_id': 135,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [135],
'message': 'Redecentralization of the Web',
'observation_id': 136,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [136],
'message': 'Redecentralization of the Web',
'observation_id': 137,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [137],
'message': 'Redecentralization of the Web',
'observation_id': 138,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [138],
'message': 'Redecentralization of the Web',
'observation_id': 139,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [139],
'message': 'Redecentralization of the Web',
'observation_id': 140,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [140],
'message': 'Redecentralization of the Web',
'observation_id': 141,
| |
<gh_stars>0
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .adc import _adc
from .dac import _quantize_dac
from .w2g import w2g
quantize_input = _quantize_dac.apply
quantize_weight = _quantize_dac.apply
adc = _adc.apply
from .learnable_quantize import Quantizer_train, lq_weight
MOVING_AVERAGES_FACTOR = 0.9
EPS = 0.0001
NORM_PPF_0_75 = 0.6745
class crxb_Conv2d(nn.Conv2d):
"""
This is the custom conv layer that takes non-ideal effects of ReRAM crossbar into account. It has three functions.
1) emulate the DAC at the input of the crossbar and qnantize the input and weight tensors.
2) map the quantized tensor to the ReRAM crossbar arrays and include non-ideal effects such as noise, ir drop, and
SAF.
3) emulate the ADC at the output of he crossbar and convert the current back to digital number
to the input of next layers
Args:
ir_drop(bool): switch that enables the ir drop calculation.
device(torch.device): device index to select. It’s a no-op if this argument is a negative integer or None.
gmax(float): maximum conductance of the ReRAM.
gmin(float): minimun conductance of the ReRAM.
gwire(float): conductance of the metal wire.
gload(float): load conductance of the ADC and DAC.
scaler_dw(float): weight quantization scaler to reduce the influence of the ir drop.
vdd(float): supply voltage.
enable_stochastic_noise(bool): switch to enable stochastic_noise.
freq(float): operating frequency of the ReRAM crossbar.
temp(float): operating temperature of ReRAM crossbar.
crxb_size(int): size of the crossbar.
quantize(int): quantization resolution of the crossbar.
enable_SAF(bool): switch to enable SAF
enable_ec_SAF(bool): switch to enable SAF error correction.
"""
def __init__(self, in_channels, out_channels, kernel_size, ir_drop, device, gmax, gmin, gwire,
gload, scaler_dw=1, vdd=3.3, stride=1, padding=0, dilation=1, enable_noise=True,
freq=10e6, temp=300, groups=1, bias=True, crxb_size=64, quantize=8, enable_SAF=False,
enable_ec_SAF=False):
super(crxb_Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
assert self.groups == 1, "currently not support grouped convolution for custom conv"
self.ir_drop = ir_drop
self.device = device
################## Crossbar conversion #############################
self.crxb_size = crxb_size
self.enable_ec_SAF = enable_ec_SAF
self.nchout_index = nn.Parameter(torch.arange(self.out_channels), requires_grad=False)
weight_flatten = self.weight.view(self.out_channels, -1)
self.crxb_row, self.crxb_row_pads = self.num_pad(
weight_flatten.shape[1], self.crxb_size)
self.crxb_col, self.crxb_col_pads = self.num_pad(
weight_flatten.shape[0], self.crxb_size)
self.h_out = None
self.w_out = None
self.w_pad = (0, self.crxb_row_pads, 0, self.crxb_col_pads)
self.input_pad = (0, 0, 0, self.crxb_row_pads)
weight_padded = F.pad(weight_flatten, self.w_pad,
mode='constant', value=0)
weight_crxb = weight_padded.view(self.crxb_col, self.crxb_size,
self.crxb_row, self.crxb_size).transpose(1, 2)
################# Hardware conversion ##############################
# weight and input levels
self.qbit = quantize
self.n_lvl = 2 ** quantize
self.h_lvl = (self.n_lvl - 2) / 2
# ReRAM cells
self.Gmax = gmax # max conductance
self.Gmin = gmin # min conductance
self.delta_g = (self.Gmax - self.Gmin) # conductance step
self.w2g = w2g(self.delta_g, Gmin=self.Gmin, G_SA0=self.Gmax,
G_SA1=self.Gmin, weight_shape=weight_crxb.shape, enable_SAF=enable_SAF)
self.Gwire = gwire
self.Gload = gload
# DAC
self.Vdd = vdd # unit: volt
self.delta_v = self.Vdd / (self.n_lvl - 1)
self.delta_in_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
self.delta_out_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
self.counter = nn.Parameter(torch.Tensor(1), requires_grad=False)
self.scaler_dw = scaler_dw
################ Stochastic Conductance Noise setup #########################
# parameters setup
self.enable_stochastic_noise = enable_noise
self.freq = freq # operating frequency
self.kb = 1.38e-23 # Boltzmann const
self.temp = temp # temperature in kelvin
self.q = 1.6e-19 # electron charge
self.tau = 0.5 # Probability of RTN
self.a = 1.662e-7 # RTN fitting parameter
self.b = 0.0015 # RTN fitting parameter
# =================== learn-able Quantized Weight ===================
x = self.weight
oc, ic, k1, k2 = x.shape
nbit = self.qbit
n = oc * k1 * k2
init_basis = []
base = NORM_PPF_0_75 * ((2. / n) ** 0.5) / (2 ** (nbit - 1))
for j in range(nbit):
init_basis.append([(2 ** j) * base for i in range(oc)])
init_basis.append(x.mean((1, 2, 3)).tolist())
num_levels = 2 ** nbit
# initialize level multiplier
# binary code of each level:
# shape: [num_levels, nbit+1]
init_level_multiplier = []
for i in range(num_levels):
level_multiplier_i = [0. for j in range(nbit)]
level_number = i
for j in range(nbit):
binary_code = level_number % 2
if binary_code == 0:
binary_code = -1
level_multiplier_i[j] = float(binary_code)
level_number = level_number // 2
level_multiplier_i.append(1.)
init_level_multiplier.append(level_multiplier_i)
# initialize threshold multiplier
# shape: [num_levels-1, num_levels]
# [[0,0,0,0,0,0,0.5,0.5]
# [0,0,0,0,0,0.5,0.5,0,]
# [0,0,0,0,0.5,0.5,0,0,]
# ...
# [0.5,0.5,0,0,0,0,0,0,]]
init_thrs_multiplier = []
for i in range(1, num_levels):
thrs_multiplier_i = [0. for j in range(num_levels)]
thrs_multiplier_i[i - 1] = 0.5
thrs_multiplier_i[i] = 0.5
init_thrs_multiplier.append(thrs_multiplier_i)
# [nbit + 1, oc]
basis = torch.tensor(init_basis, dtype=torch.float32, requires_grad=False).to(device)
self.register_buffer("basis", basis)
# [2**nbit, nbit + 1] or [num_levels, nbit + 1]
self.level_codes = torch.tensor(init_level_multiplier).to(device)
# [num_levels-1, num_levels]
self.thrs_multiplier = torch.tensor(init_thrs_multiplier).to(device)
# print(self.level_codes.is_cuda, self.thrs_multiplier.is_cuda)
# [num_levels, oc]
self.register_buffer('levels', torch.matmul(self.level_codes, basis))
# [num_levels-1, oc]
self.register_buffer('thrs', torch.matmul(self.thrs_multiplier, self.levels))
def num_pad(self, source, target):
crxb_index = math.ceil(source / target)
num_padding = crxb_index * target - source
return crxb_index, num_padding
def forward(self, input):
# 1. input data and weight quantization
with torch.no_grad():
self.delta_w = self.weight.abs().max() * self.scaler_dw
if self.training:
self.counter.data += 1
self.delta_x = input.abs().max() / self.h_lvl
self.delta_in_sum.data += self.delta_x
else:
self.delta_x = self.delta_in_sum.data / self.counter.data
input_clip = F.hardtanh(input, min_val=-self.h_lvl * self.delta_x.item(),
max_val=self.h_lvl * self.delta_x.item())
input_quan = quantize_input(
input_clip, self.delta_x) * self.delta_v # convert to voltage
# weight_quan = quantize_weight(self.weight, self.delta_w)
# =================== train learn-able Quantized Weight start ===================
x = self.weight
oc, ic, k1, k2 = x.shape
nbit = self.qbit
num_levels = self.n_lvl
with torch.no_grad():
pass
if self.training:
# wq, levels, threholds = QuantizedWeight(self.weight, oc*k1*k2, self.qbit, True)
# weight_quan_temp = wq - self.weight
basis_t, levels_t, thrs_t, level_codes_t = Quantizer_train(x, self.basis, self.level_codes,
self.thrs_multiplier, nbit, self.training)
# self.basis = basis_t.transpose(1, 0)
self.levels = levels_t.transpose(1, 0)
self.thrs = thrs_t.transpose(1, 0)
self.level_codes_channelwise = level_codes_t.transpose(1, 0)
# calculate output y and its binary code
# y [K, K, iC, oC]
# bits_y [K x K x iC, oC, nbit + 1]
y = lq_weight.apply(x, self.levels, self.thrs, nbit)
# # [oC x iC x K x K] -> [K x K x iC x oC]
# xp = x.permute((3, 2, 1, 0))
# reshape_x = torch.reshape(xp, [-1, oc])
# bits_y = torch.full([reshape_x.shape[0], oc, nbit + 1], -1.)
# zero_bits_y = torch.zeros_like(bits_y)
#
# # [K x K x iC x oC] [1, oC]
# for i in torch.arange(num_levels - 1):
# g = torch.ge(xp, thrs[i])
# # [K, K, iC, oC] + [1, oC], [K, K, iC, oC] => [K, K, iC, oC]
# # [K x K x iC, oC, nbit + 1]
# bits_y = torch.where(g.view(-1, oc, 1), zero_bits_y + level_codes_channelwise[i + 1], bits_y)
# return y.permute(3, 2, 1, 0), levels.permute(1, 0), thrs.permute(1, 0)
weight_quan = y / self.delta_w
# =================== train learn-able Quantized Weight end===================
# 2. Perform the computation between input voltage and weight conductance
if self.h_out is None and self.w_out is None:
self.h_out = int(
(input.shape[2] - self.kernel_size[0] + 2 * self.padding[0]) / self.stride[0] + 1)
self.w_out = int(
(input.shape[3] - self.kernel_size[0] + 2 * self.padding[0]) / self.stride[0] + 1)
# 2.1 flatten and unfold the weight and input
input_unfold = F.unfold(input_quan, kernel_size=self.kernel_size[0],
dilation=self.dilation, padding=self.padding,
stride=self.stride)
# print(weight_quan.shape, self.out_channels)
weight_flatten = weight_quan.contiguous().view(self.out_channels, -1)
# 2.2. add paddings
weight_padded = F.pad(weight_flatten, self.w_pad,
mode='constant', value=0)
input_padded = F.pad(input_unfold, self.input_pad,
mode='constant', value=0)
# 2.3. reshape to crxb size
input_crxb = input_padded.view(input.shape[0], 1, self.crxb_row,
self.crxb_size, input_padded.shape[2])
weight_crxb = weight_padded.view(self.crxb_col, self.crxb_size,
self.crxb_row, self.crxb_size).transpose(1, 2)
# convert the floating point weight into conductance pair values
G_crxb = self.w2g(weight_crxb)
# 2.4. compute matrix multiplication followed by reshapes
# this block is for introducing stochastic noise into ReRAM conductance
if self.enable_stochastic_noise:
rand_p = nn.Parameter(torch.Tensor(G_crxb.shape),
requires_grad=False)
rand_g = nn.Parameter(torch.Tensor(G_crxb.shape),
requires_grad=False)
if self.device.type == "cuda":
rand_p = rand_p.cuda()
rand_g = rand_g.cuda()
with torch.no_grad():
input_reduced = (input_crxb.norm(p=2, dim=0).norm(p=2, dim=3).unsqueeze(dim=3)) / \
(input_crxb.shape[0] * input_crxb.shape[3])
grms = torch.sqrt(
G_crxb * self.freq * (4 * self.kb * self.temp + 2 * self.q * input_reduced) / (input_reduced ** 2) \
+ (G_crxb / 3 / 128 / 128) ** 2) # G_crxb / 3 / 128
grms[torch.isnan(grms)] = 0
grms[grms.eq(float('inf'))] = 0
rand_p.uniform_()
rand_g.normal_(0, 1)
G_p = G_crxb * (self.b * G_crxb + self.a) / (G_crxb - (self.b * G_crxb + self.a))
G_p[rand_p.ge(self.tau)] = 0
G_g = grms * rand_g
G_crxb += (G_g.cuda() + G_p)
# this block is to calculate the ir drop of the crossbar
if self.ir_drop:
from .IR_solver import IrSolver
crxb_pos = IrSolver(Rsize=self.crxb_size,
Csize=self.crxb_size,
Gwire=self.Gwire,
| |
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:901814",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1550957",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1659175",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1659960",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1291711",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1294557",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1297517",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1312784",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:2046528",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1293964",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1049906",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1086720",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1094131",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1251783",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1294594",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1294602",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1291867",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1664631",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1664658",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1789740",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1190448",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1294370",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1294372",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1537194",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1537340",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1809636",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1249715",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1249717",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1250907",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1291961",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1294322",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1294365",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1294366",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1294376",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1297410",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1297950",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1298348",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1310475",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "rxcui:1310483",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3249838",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2961230",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C1966623",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2961550",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710420",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2962545",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2962729",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2962733",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710394",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3249809",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2979106",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2979108",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2979177",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710401",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2370127",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2370174",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2586469",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710316",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3245048",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3709736",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C4019484",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C4237811",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2978894",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2980718",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710377",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3152278",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3153662",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3154175",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3159420",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3160222",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3163535",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C3192005",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710423",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710408",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710392",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0715994",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0715389",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0715391",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C1329338",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0697421",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C1642798",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C2940395",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710387",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710389",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710297",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710349",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710398",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710301",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710364",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710395",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710300",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710359",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0713131",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0715978",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0710367",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0714624",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C1631089",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0711604",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
}
},
{
"type": "associated_with",
"source_id": "umlscui:C0711606",
"target_id": "MONDO:0004979",
"attributes": {
"p_val": 0.04742451468670237
| |
import os
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import u
from libcloud.utils.py3 import httplib
from libcloud.test import MockHttp
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
class FileFixtures(object):
def __init__(self):
script_dir = os.path.abspath(os.path.split(__file__)[0])
self.root = os.path.join(script_dir, 'fixtures')
def load(self, file):
path = os.path.join(self.root, file)
if os.path.exists(path):
if PY3:
kwargs = {'encoding': 'utf-8'}
else:
kwargs = {}
with open(path, 'r', **kwargs) as fh:
content = fh.read()
return u(content)
else:
raise IOError(path)
class ComputeFileFixtures(FileFixtures):
def __init__(self):
super(ComputeFileFixtures, self).__init__()
class InvalidRequestError(Exception):
def __init__(self, tag):
super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag)
class DimensionDataMockHttp(MockHttp):
fixtures = ComputeFileFixtures()
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_PAGINATED(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_ALLFILTERS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_image(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_imageWithDiskSpeed(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_imageWithDiskSpeed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers):
if method is "POST":
request = ET.fromstring(body)
if request.tag != "{http://oec.api.opsource.net/schemas/network}NewNetworkWithLocation":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation_NA9(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e(self, method,
url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1(self, method, url, body, headers):
action = url.split('?')[-1]
if action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_POST.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_FAIL_EXISTING(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create_FAIL.xml'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418_FAIL(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete_FAIL.xml'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_deleteServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_deleteServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_rebootServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_rebootServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers):
if url.endswith('datacenterId=NA3'):
body = self.fixtures.load(
'2.4/server_server_NA3.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load(
'2.4/server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGESIZE50(self, method, url, body, headers):
if not url.endswith('pageSize=50'):
raise ValueError("pageSize is not set as expected")
body = self.fixtures.load(
'2.4/server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_EMPTY(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_paginated_empty.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGED_THEN_EMPTY(self, method, url, body, headers):
if 'pageNumber=2' in url:
body = self.fixtures.load(
'server_server_paginated_empty.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load(
'2.4/server_server_paginated.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATED(self, method, url, body, headers):
if 'pageNumber=2' in url:
body = self.fixtures.load(
'2.4/server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load(
'2.4/server_server_paginated.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATEDEMPTY(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_paginated_empty.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS(self, method, url, body, headers):
(_, params) = url.split('?')
parameters = params.split('&')
for parameter in parameters:
(key, value) = parameter.split('=')
if key == 'datacenterId':
assert value == 'fake_loc'
elif key == 'networkId':
assert value == 'fake_network'
elif key == 'networkDomainId':
assert value == 'fake_network_domain'
elif key == 'vlanId':
assert value == 'fake_vlan'
elif key == 'ipv6':
assert value == 'fake_ipv6'
elif key == 'privateIpv4':
assert value == 'fake_ipv4'
elif key == 'name':
assert value == 'fake_name'
elif key == 'state':
assert value == 'fake_state'
elif key == 'started':
assert value == 'True'
elif key == 'deployed':
assert value == 'True'
elif key == 'sourceImageId':
assert value == 'fake_image'
else:
raise ValueError("Could not find in url parameters {0}:{1}".format(key, value))
body = self.fixtures.load(
'2.4/server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule(self, method, url, body, headers):
body = self.fixtures.load(
'server_antiAffinityRule_list.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_ALLFILTERS(self, method, url, body, headers):
(_, params) = url.split('?')
parameters = params.split('&')
for parameter in parameters:
(key, value) = parameter.split('=')
if key == 'id':
assert value == 'FAKE_ID'
elif key == 'state':
assert value == 'FAKE_STATE'
elif key == 'pageSize':
assert value == '250'
elif key == 'networkDomainId':
pass
else:
raise ValueError("Could not find in url parameters {0}:{1}".format(key, value))
body = self.fixtures.load(
'server_antiAffinityRule_list.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_PAGINATED(self, method, url, body, headers):
if 'pageNumber=2' in url:
body = self.fixtures.load(
'server_antiAffinityRule_list.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load(
'server_antiAffinityRule_list_PAGINATED.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter(self, method, url, body, headers):
if url.endswith('id=NA9'):
body = self.fixtures.load(
'infrastructure_datacenter_NA9.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load(
'infrastructure_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter_ALLFILTERS(self, method, url, body, headers):
if url.endswith('id=NA9'):
body = self.fixtures.load(
'infrastructure_datacenter_NA9.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load(
'infrastructure_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_updateVmwareTools(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}updateVmwareTools":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_updateVmwareTools.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_startServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_startServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_shutdownServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_shutdownServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}resetServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_resetServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_powerOffServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS(self, | |
in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
default_sentinel = object()
obj = Variable.get(key, default_var=default_sentinel,
deserialize_json=deserialize_json)
if obj is default_sentinel:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj
@classmethod
@provide_session
def get(cls, key, default_var=None, deserialize_json=False, session=None):
obj = session.query(cls).filter(cls.key == key).first()
if obj is None:
if default_var is not None:
return default_var
else:
raise KeyError('Variable {} does not exist'.format(key))
else:
if deserialize_json:
return json.loads(obj.val)
else:
return obj.val
@classmethod
@provide_session
def set(cls, key, value, serialize_json=False, session=None):
if serialize_json:
stored_value = json.dumps(value)
else:
stored_value = str(value)
session.query(cls).filter(cls.key == key).delete()
session.add(Variable(key=key, val=stored_value))
session.flush()
def rotate_fernet_key(self):
fernet = get_fernet()
if self._val and self.is_encrypted:
self._val = fernet.rotate(self._val.encode('utf-8')).decode()
class DagRun(Base, LoggingMixin):
"""
DagRun describes an instance of a Dag. It can be created
by the scheduler (for regular runs) or by an external trigger
"""
__tablename__ = "dag_run"
ID_PREFIX = 'scheduled__'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN))
execution_date = Column(UtcDateTime, default=timezone.utcnow)
start_date = Column(UtcDateTime, default=timezone.utcnow)
end_date = Column(UtcDateTime)
_state = Column('state', String(50), default=State.RUNNING)
run_id = Column(String(ID_LEN))
external_trigger = Column(Boolean, default=True)
conf = Column(PickleType)
dag = None
__table_args__ = (
Index('dag_id_state', dag_id, _state),
UniqueConstraint('dag_id', 'execution_date'),
UniqueConstraint('dag_id', 'run_id'),
)
def __repr__(self):
return (
'<DagRun {dag_id} @ {execution_date}: {run_id}, '
'externally triggered: {external_trigger}>'
).format(
dag_id=self.dag_id,
execution_date=self.execution_date,
run_id=self.run_id,
external_trigger=self.external_trigger)
def get_state(self):
return self._state
def set_state(self, state):
if self._state != state:
self._state = state
self.end_date = timezone.utcnow() if self._state in State.finished() else None
@declared_attr
def state(self):
return synonym('_state',
descriptor=property(self.get_state, self.set_state))
@classmethod
def id_for_date(cls, date, prefix=ID_FORMAT_PREFIX):
return prefix.format(date.isoformat()[:19])
@provide_session
def refresh_from_db(self, session=None):
"""
Reloads the current dagrun from the database
:param session: database session
"""
DR = DagRun
exec_date = func.cast(self.execution_date, DateTime)
dr = session.query(DR).filter(
DR.dag_id == self.dag_id,
func.cast(DR.execution_date, DateTime) == exec_date,
DR.run_id == self.run_id
).one()
self.id = dr.id
self.state = dr.state
@staticmethod
@provide_session
def find(dag_id=None, run_id=None, execution_date=None,
state=None, external_trigger=None, no_backfills=False,
session=None):
"""
Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id to find dag runs for
:type dag_id: int, list
:param run_id: defines the the run id for this dag run
:type run_id: str
:param execution_date: the execution date
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:type no_backfills: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
DR = DagRun
qry = session.query(DR)
if dag_id:
qry = qry.filter(DR.dag_id == dag_id)
if run_id:
qry = qry.filter(DR.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
qry = qry.filter(DR.execution_date.in_(execution_date))
else:
qry = qry.filter(DR.execution_date == execution_date)
if state:
qry = qry.filter(DR.state == state)
if external_trigger is not None:
qry = qry.filter(DR.external_trigger == external_trigger)
if no_backfills:
# in order to prevent a circular dependency
from airflow.jobs import BackfillJob
qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'))
dr = qry.order_by(DR.execution_date).all()
return dr
@provide_session
def get_task_instances(self, state=None, session=None):
"""
Returns the task instances for this dag run
"""
TI = TaskInstance
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
)
if state:
if isinstance(state, six.string_types):
tis = tis.filter(TI.state == state)
else:
# this is required to deal with NULL values
if None in state:
tis = tis.filter(
or_(TI.state.in_(state),
TI.state.is_(None))
)
else:
tis = tis.filter(TI.state.in_(state))
if self.dag and self.dag.partial:
tis = tis.filter(TI.task_id.in_(self.dag.task_ids))
return tis.all()
@provide_session
def get_task_instance(self, task_id, session=None):
"""
Returns the task instance specified by task_id for this dag run
:param task_id: the task id
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id == task_id
).first()
return ti
def get_dag(self):
"""
Returns the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException("The DAG (.dag) for {} needs to be set"
.format(self))
return self.dag
@provide_session
def get_previous_dagrun(self, session=None):
"""The previous DagRun, if there is one"""
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date
).order_by(
DagRun.execution_date.desc()
).first()
@provide_session
def get_previous_scheduled_dagrun(self, session=None):
"""The previous, SCHEDULED DagRun, if there is one"""
dag = self.get_dag()
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date)
).first()
@provide_session
def update_state(self, session=None):
"""
Determines the overall state of the DagRun based on the state
of its TaskInstances.
:return: State
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
self.log.debug("Updating state for %s considering %s task(s)", self, len(tis))
for ti in list(tis):
# skip in db?
if ti.state == State.REMOVED:
tis.remove(ti)
else:
ti.task = dag.get_task(ti.task_id)
# pre-calculate
# db is faster
start_dttm = timezone.utcnow()
unfinished_tasks = self.get_task_instances(
state=State.unfinished(),
session=session
)
none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
none_task_concurrency = all(t.task.task_concurrency is None
for t in unfinished_tasks)
# small speed up
if unfinished_tasks and none_depends_on_past and none_task_concurrency:
# todo: this can actually get pretty slow: one task costs between 0.01-015s
no_dependencies_met = True
for ut in unfinished_tasks:
# We need to flag upstream and check for changes because upstream
# failures/re-schedules can result in deadlock false positives
old_state = ut.state
deps_met = ut.are_dependencies_met(
dep_context=DepContext(
flag_upstream_failed=True,
ignore_in_retry_period=True,
ignore_in_reschedule_period=True),
session=session)
if deps_met or old_state != ut.current_state(session=session):
no_dependencies_met = False
break
duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000
Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration)
# future: remove the check on adhoc tasks (=active_tasks)
if len(tis) == len(dag.active_tasks):
root_ids = [t.task_id for t in dag.roots]
roots = [t for t in tis if t.task_id in root_ids]
# if all roots finished and at least one failed, the run failed
if (not unfinished_tasks and
any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)):
self.log.info('Marking run %s failed', self)
self.set_state(State.FAILED)
dag.handle_callback(self, success=False, reason='task_failure',
session=session)
# if all roots succeeded and no unfinished tasks, the run succeeded
elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED)
for r in roots):
self.log.info('Marking run %s successful', self)
self.set_state(State.SUCCESS)
dag.handle_callback(self, success=True, reason='success', session=session)
# if *all tasks* are deadlocked, the run failed
elif (unfinished_tasks and none_depends_on_past and
none_task_concurrency and no_dependencies_met):
self.log.info('Deadlock; marking run %s failed', self)
self.set_state(State.FAILED)
dag.handle_callback(self, success=False, reason='all_tasks_deadlocked',
session=session)
# finally, if the roots aren't done, the dag is still running
else:
self.set_state(State.RUNNING)
# todo: determine we want to use with_for_update to make sure to lock the run
session.merge(self)
session.commit()
return self.state
@provide_session
def verify_integrity(self, session=None):
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = []
for ti in tis:
task_ids.append(ti.task_id)
task = None
try:
task = dag.get_task(ti.task_id)
except AirflowException:
if ti.state == State.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state is not State.RUNNING and not dag.partial:
self.log.warning("Failed to get task '{}' for dag '{}'. "
"Marking it as removed.".format(ti, dag))
Stats.incr(
"task_removed_from_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.REMOVED
is_task_in_dag = task is not None
should_restore_task = is_task_in_dag and ti.state == State.REMOVED
if should_restore_task:
self.log.info("Restoring task '{}' which was previously "
"removed from DAG '{}'".format(ti, dag))
Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.NONE
# check for missing tasks
for task in six.itervalues(dag.task_dict):
if task.adhoc:
continue
if task.start_date > self.execution_date and not self.is_backfill:
continue
if task.task_id not in task_ids:
Stats.incr(
"task_instance_created-{}".format(task.__class__.__name__),
1, 1)
ti = TaskInstance(task, self.execution_date)
session.add(ti)
session.commit()
@staticmethod
def get_run(session, dag_id, execution_date):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:param execution_date: execution date
:type execution_date: datetime
:return: DagRun corresponding to the given dag_id and execution date
if one exists. None otherwise.
:rtype: DagRun
"""
qry = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.external_trigger == False, # noqa
DagRun.execution_date == execution_date,
)
return qry.first()
@property
def is_backfill(self):
from airflow.jobs import BackfillJob
return (
self.run_id is not None and
self.run_id.startswith(BackfillJob.ID_PREFIX)
)
@classmethod
@provide_session
def get_latest_runs(cls, session):
"""Returns the latest DagRun for each DAG. """
subquery = (
session
.query(
cls.dag_id,
func.max(cls.execution_date).label('execution_date'))
.group_by(cls.dag_id)
.subquery()
)
dagruns = (
session
.query(cls)
.join(subquery,
and_(cls.dag_id == subquery.c.dag_id,
cls.execution_date == subquery.c.execution_date))
.all()
)
return dagruns
class | |
slippage:", "{:.4%}".format(slippage))
# check how much liquidity we have staked in the frax contract
staking = Contract("0x3EF26504dbc8Dd7B7aa3E97Bc9f3813a9FC0B4B0")
locked = staking.lockedLiquidityOf(strategy)
print("\nStrategy locked liquidity:", locked)
# Display estimated APR
print(
"\nEstimated APR with trading fees: ",
"{:.2%}".format(
((new_assets - old_assets) * (365)) / (strategy.estimatedTotalAssets())
),
)
# simulate 1 day of earnings so our LP will unlock
chain.sleep(86400)
chain.mine(1)
# turn off auto-restake since we want to withdraw after this harvest
strategy.setManagerParams(False, False, 50, {"from": gov})
harvest = strategy.harvest({"from": gov})
print("\nThe is our harvest info:", harvest.events["Harvested"])
# check on our NFT LP
real_balance = strategy.balanceOfNFTpessimistic() / (10 ** token.decimals())
virtual_balance = strategy.balanceOfNFToptimistic() / (10 ** token.decimals())
slippage = (virtual_balance - real_balance) / real_balance
print("\nHere's how much is in our NFT (pessimistic):", real_balance)
print("Here's how much is in our NFT (optimistic):", virtual_balance)
print("This is our slippage:", "{:.4%}".format(slippage))
# check how much liquidity we have staked in the frax contract
staking = Contract("0x3EF26504dbc8Dd7B7aa3E97Bc9f3813a9FC0B4B0")
locked = staking.lockedLiquidityOf(strategy)
print("\nStrategy locked liquidity:", locked)
# simulate 1 day for share price to rise
chain.sleep(86400)
chain.mine(1)
# withdraw and check on our losses (due to slippage on big swaps in/out)
tx = vault.withdraw(amount, whale, 10_000, {"from": whale})
loss = startingWhale - token.balanceOf(whale) - tradingLosses
print("Losses from withdrawal slippage:", loss / (10 ** token.decimals()))
assert vault.pricePerShare() > 10 ** token.decimals()
print("Vault share price", vault.pricePerShare() / (10 ** token.decimals()))
# simulate some trading in the uniswap pool with our whale to unbalance it
# @pytest.mark.skip(reason="currently crashes testing")
def test_simple_harvest_imbalanced_pool(
gov,
token,
vault,
strategist,
whale,
strategy,
chain,
strategist_ms,
amount,
accounts,
frax,
):
## deposit to the vault after approving
startingWhale = token.balanceOf(whale)
token.approve(vault, 2**256 - 1, {"from": whale})
vault.deposit(amount, {"from": whale})
newWhale = token.balanceOf(whale)
# harvest, store asset amount
chain.sleep(1)
chain.mine(1)
# check on our NFT LP
real_balance = strategy.balanceOfNFTpessimistic() / (10 ** token.decimals())
virtual_balance = strategy.balanceOfNFToptimistic() / (10 ** token.decimals())
slippage = (virtual_balance - real_balance) / real_balance
print("\nHere's how much is in our NFT (pessimistic):", real_balance)
print("Here's how much is in our NFT (optimistic):", virtual_balance)
print("This is our slippage:", "{:.4%}".format(slippage))
# check how much liquidity we have staked in the frax contract
staking = Contract("0x3EF26504dbc8Dd7B7aa3E97Bc9f3813a9FC0B4B0")
locked = staking.lockedLiquidityOf(strategy)
print("\nStrategy locked liquidity:", locked)
strategy.harvest({"from": gov})
chain.sleep(1) # we currently lock for a day
chain.mine(1)
# check on our NFT LP
real_balance = strategy.balanceOfNFTpessimistic() / (10 ** token.decimals())
virtual_balance = strategy.balanceOfNFToptimistic() / (10 ** token.decimals())
slippage = (virtual_balance - real_balance) / real_balance
print("\nHere's how much is in our NFT (pessimistic):", real_balance)
print("Here's how much is in our NFT (optimistic):", virtual_balance)
print("This is our slippage:", "{:.4%}".format(slippage))
# check how much liquidity we have staked in the frax contract
staking = Contract("0x3EF26504dbc8Dd7B7aa3E97Bc9f3813a9FC0B4B0")
locked = staking.lockedLiquidityOf(strategy)
print("\nStrategy locked liquidity:", locked)
# have our whale trade in the uniV3 pool a bunch to generate some fees
uni_values_token = [token.address, 500, frax.address]
uni_values_frax = [frax.address, 500, token.address]
uni_types = ("address", "uint24", "address")
packed_path_token = encode_abi_packed(uni_types, uni_values_token)
packed_path_frax = encode_abi_packed(uni_types, uni_values_frax)
uni_router = Contract("0xE592427A0AEce92De3Edee1F18E0157C05861564")
token.approve(uni_router, 2**256 - 1, {"from": whale})
frax.approve(uni_router, 2**256 - 1, {"from": whale})
print("\nLet's do some trading!")
want_to_swap = (
token.balanceOf(whale) / 15
) # whale has like $200m USDC, we don't need to do that lol
# note that if we do enough, we will drain all FRAX, and then won't get any more rewards from the staking pool
for i in range(3):
exact_input = (packed_path_token, whale.address, 2**256 - 1, want_to_swap, 1)
uni_router.exactInput(exact_input, {"from": whale})
chain.sleep(1)
chain.mine(1)
print("Done with round", i)
tradingLosses = newWhale - token.balanceOf(whale)
print("USDC lost trading", tradingLosses / (10 ** token.decimals()))
nft_holdings = strategy.principal()
print(
"\nCurrent NFT Holdings after trading\nFRAX:",
nft_holdings[0] / 1e18,
"\nUSDC:",
nft_holdings[1] / 1e6,
)
# check on our NFT LP
real_balance = strategy.balanceOfNFTpessimistic() / (10 ** token.decimals())
virtual_balance = strategy.balanceOfNFToptimistic() / (10 ** token.decimals())
slippage = (virtual_balance - real_balance) / real_balance
print("\nHere's how much is in our NFT (pessimistic):", real_balance)
print("Here's how much is in our NFT (optimistic):", virtual_balance)
print("This is our slippage:", "{:.4%}".format(slippage))
# check how much liquidity we have staked in the frax contract
staking = Contract("0x3EF26504dbc8Dd7B7aa3E97Bc9f3813a9FC0B4B0")
locked = staking.lockedLiquidityOf(strategy)
print("\nStrategy locked liquidity:", locked)
old_assets = vault.totalAssets()
assert old_assets > 0
assert strategy.estimatedTotalAssets() > 0
print("\nStarting vault total assets: ", old_assets / (10 ** token.decimals()))
print(
"Strategy total assets:",
strategy.estimatedTotalAssets() / (10 ** token.decimals()),
)
# simulate one day, since that's how long we lock for
chain.sleep(86400)
chain.mine(1)
# harvest, store new asset amount
chain.sleep(1)
harvest = strategy.harvest({"from": gov})
print("The is our harvest info:", harvest.events["Harvested"])
chain.sleep(1)
new_assets = vault.totalAssets()
# confirm we made money, or at least that we have about the same
assert new_assets >= old_assets
print("\nVault total assets after harvest: ", new_assets / (10 ** token.decimals()))
print(
"Strategy total assets:",
strategy.estimatedTotalAssets() / (10 ** token.decimals()),
)
# check on our NFT LP
real_balance = strategy.balanceOfNFTpessimistic() / (10 ** token.decimals())
virtual_balance = strategy.balanceOfNFToptimistic() / (10 ** token.decimals())
slippage = (virtual_balance - real_balance) / real_balance
print("\nHere's how much is in our NFT (pessimistic):", real_balance)
print("Here's how much is in our NFT (optimistic):", virtual_balance)
print("This is our slippage:", "{:.4%}".format(slippage))
# check how much liquidity we have staked in the frax contract
staking = Contract("0x3EF26504dbc8Dd7B7aa3E97Bc9f3813a9FC0B4B0")
locked = staking.lockedLiquidityOf(strategy)
print("\nStrategy locked liquidity:", locked)
# Display estimated APR
print(
"\nEstimated APR with trading fees: ",
"{:.2%}".format(
((new_assets - old_assets) * (365)) / (strategy.estimatedTotalAssets())
),
)
# simulate 1 day of earnings so our LP will unlock
chain.sleep(86400)
chain.mine(1)
# turn off auto-restake since we want to withdraw after this harvest
strategy.setManagerParams(False, False, 50, {"from": gov})
harvest = strategy.harvest({"from": gov})
print("\nThe is our harvest info:", harvest.events["Harvested"])
print(
"\nVault total assets after final harvest: ",
new_assets / (10 ** token.decimals()),
)
print(
"Strategy total assets:",
strategy.estimatedTotalAssets() / (10 ** token.decimals()),
)
# check on our NFT LP
real_balance = strategy.balanceOfNFTpessimistic() / (10 ** token.decimals())
virtual_balance = strategy.balanceOfNFToptimistic() / (10 ** token.decimals())
slippage = (virtual_balance - real_balance) / real_balance
print("\nHere's how much is in our NFT (pessimistic):", real_balance)
print("Here's how much is in our NFT (optimistic):", virtual_balance)
print("This is our slippage:", "{:.4%}".format(slippage))
# check how much liquidity we have staked in the frax contract
staking = Contract("0x3EF26504dbc8Dd7B7aa3E97Bc9f3813a9FC0B4B0")
locked = staking.lockedLiquidityOf(strategy)
print("\nStrategy locked liquidity:", locked)
# simulate 1 day for share price to rise
chain.sleep(86400)
chain.mine(1)
# withdraw and check on our losses (due to slippage on big swaps in/out)
tx = vault.withdraw(amount, whale, 10_000, {"from": whale})
loss = startingWhale - token.balanceOf(whale) - tradingLosses
print("Losses from withdrawal slippage:", loss / (10 ** token.decimals()))
assert vault.pricePerShare() > 10 ** token.decimals()
print("Vault share price", vault.pricePerShare() / (10 ** token.decimals()))
# simulate some trading in the uniswap pool with our whale to unbalance it, then try to assess where we are with checkTrueHoldings
# @pytest.mark.skip(reason="currently reverts in testing")
def test_simple_harvest_imbalanced_pool_check_holdings(
gov,
token,
vault,
strategist,
whale,
strategy,
chain,
strategist_ms,
amount,
accounts,
frax,
):
## deposit to the vault after approving
startingWhale = token.balanceOf(whale)
token.approve(vault, 2**256 - 1, {"from": whale})
vault.deposit(amount, {"from": whale})
newWhale = token.balanceOf(whale)
# harvest, store asset amount
chain.sleep(1)
chain.mine(1)
# check on our NFT LP
real_balance = strategy.balanceOfNFTpessimistic() / (10 ** token.decimals())
virtual_balance = strategy.balanceOfNFToptimistic() / (10 ** token.decimals())
slippage = (virtual_balance - real_balance) / real_balance
print("\nHere's how much is in our NFT (pessimistic):", real_balance)
print("Here's how much is in our NFT (optimistic):", virtual_balance)
print("This is our slippage:", "{:.4%}".format(slippage))
# check how much liquidity we have staked in the frax contract
staking = Contract("0x3EF26504dbc8Dd7B7aa3E97Bc9f3813a9FC0B4B0")
locked = staking.lockedLiquidityOf(strategy)
print("\nStrategy locked liquidity:", locked)
strategy.harvest({"from": gov})
chain.sleep(1) # we currently lock for a day
chain.mine(1)
# check on our NFT LP
real_balance = strategy.balanceOfNFTpessimistic() / (10 ** token.decimals())
virtual_balance = strategy.balanceOfNFToptimistic() / (10 ** token.decimals())
slippage = (virtual_balance - real_balance) / real_balance
print("\nHere's how much is in our NFT (pessimistic):", real_balance)
print("Here's how much is in our NFT (optimistic):", virtual_balance)
print("This is our slippage:", "{:.4%}".format(slippage))
# check how much liquidity we have staked in the frax contract
staking = Contract("0x3EF26504dbc8Dd7B7aa3E97Bc9f3813a9FC0B4B0")
locked = staking.lockedLiquidityOf(strategy)
print("\nStrategy locked liquidity:", locked)
# have our whale trade in the uniV3 | |
# -*- coding: utf-8 -*-
"""AND, OR, NOR gate implementation using ANN.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/shyammarjit/Deep-Learning/blob/main/AND%2C_OR%2C_NOR_gate_implementation_using_ANN.ipynb
#Implementation of Artificial Neural Network for NOR Logic Gate with 2-bit Binary Input.
"""
# Import Python Libraries
import numpy as np
from matplotlib import pyplot as plt
# Sigmoid Function Implementation
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# Initialization of the neural network parameters
# Initialized all the weights in the range of between 0 and 1
# Bias values are initialized to 0
def initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatures):
W1 = np.random.randn(neuronsInHiddenLayers, inputFeatures)
W2 = np.random.randn(outputFeatures, neuronsInHiddenLayers)
b1 = np.zeros((neuronsInHiddenLayers, 1))
b2 = np.zeros((outputFeatures, 1))
parameters = {"W1" : W1, "b1": b1,
"W2" : W2, "b2": b2}
return parameters
# Forward Propagation
def forwardPropagation(X, Y, parameters):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]
Z1 = np.dot(W1, X) + b1
A1 = sigmoid(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2)
logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y))
cost = -np.sum(logprobs) / m
return cost, cache, A2
# Backward Propagation
def backwardPropagation(X, Y, cache):
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2) = cache
dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T) / m
db2 = np.sum(dZ2, axis = 1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, A1 * (1- A1))
dW1 = np.dot(dZ1, X.T) / m
db1 = np.sum(dZ1, axis = 1, keepdims = True) / m
gradients = {"dZ2": dZ2, "dW2": dW2, "db2": db2,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
# Updating the weights based on the negative gradients
def updateParameters(parameters, gradients, learningRate):
parameters["W1"] = parameters["W1"] - learningRate * gradients["dW1"]
parameters["W2"] = parameters["W2"] - learningRate * gradients["dW2"]
parameters["b1"] = parameters["b1"] - learningRate * gradients["db1"]
parameters["b2"] = parameters["b2"] - learningRate * gradients["db2"]
return parameters
# Model to learn the NOR truth table
X = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) # NOR input
Y = np.array([[1, 0, 0, 0]]) # NOR output
# Define model parameters
neuronsInHiddenLayers = 2 # number of hidden layer neurons (2)
inputFeatures = X.shape[0] # number of input features (2)
outputFeatures = Y.shape[0] # number of output features (1)
parameters = initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatures)
epoch = 100000
learningRate = 0.01
losses = np.zeros((epoch, 1))
for i in range(epoch):
losses[i, 0], cache, A2 = forwardPropagation(X, Y, parameters)
gradients = backwardPropagation(X, Y, cache)
parameters = updateParameters(parameters, gradients, learningRate)
# Evaluating the performance
plt.figure()
plt.plot(losses)
plt.title("Implementation of Artificial Neural Network for NOR Logic Gate with 2-bit Binary Input.")
plt.xlabel("EPOCHS")
plt.ylabel("Loss value")
plt.show()
# Testing
X = np.array([[1, 1, 0, 0], [0, 1, 0, 1]]) # NOR input
cost, _, A2 = forwardPropagation(X, Y, parameters)
prediction = (A2 > 0.5) * 1.0
# print(A2)
print("INPUT: \n"+str(X))
print("OUTPUT: " + str(prediction))
"""#Implementation of Artificial Neural Network for AND Logic Gate with 2-bit Binary Input.
"""
# Import Python Libraries
import numpy as np
from matplotlib import pyplot as plt
# Sigmoid Function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# Initialization of the neural network parameters
# Initialized all the weights in the range of between 0 and 1
# Bias values are initialized to 0
def initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatures):
W1 = np.random.randn(neuronsInHiddenLayers, inputFeatures)
W2 = np.random.randn(outputFeatures, neuronsInHiddenLayers)
b1 = np.zeros((neuronsInHiddenLayers, 1))
b2 = np.zeros((outputFeatures, 1))
parameters = {"W1" : W1, "b1": b1,
"W2" : W2, "b2": b2}
return parameters
# Forward Propagation
def forwardPropagation(X, Y, parameters):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]
Z1 = np.dot(W1, X) + b1
A1 = sigmoid(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2)
logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y))
cost = -np.sum(logprobs) / m
return cost, cache, A2
# Backward Propagation
def backwardPropagation(X, Y, cache):
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2) = cache
dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T) / m
db2 = np.sum(dZ2, axis = 1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, A1 * (1- A1))
dW1 = np.dot(dZ1, X.T) / m
db1 = np.sum(dZ1, axis = 1, keepdims = True) / m
gradients = {"dZ2": dZ2, "dW2": dW2, "db2": db2,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
# Updating the weights based on the negative gradients
def updateParameters(parameters, gradients, learningRate):
parameters["W1"] = parameters["W1"] - learningRate * gradients["dW1"]
parameters["W2"] = parameters["W2"] - learningRate * gradients["dW2"]
parameters["b1"] = parameters["b1"] - learningRate * gradients["db1"]
parameters["b2"] = parameters["b2"] - learningRate * gradients["db2"]
return parameters
# Model to learn the AND truth table
X = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) # AND input
Y = np.array([[0, 0, 0, 1]]) # AND output
# Define model parameters
neuronsInHiddenLayers = 2 # number of hidden layer neurons (2)
inputFeatures = X.shape[0] # number of input features (2)
outputFeatures = Y.shape[0] # number of output features (1)
parameters = initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatures)
epoch = 100000
learningRate = 0.01
losses = np.zeros((epoch, 1))
for i in range(epoch):
losses[i, 0], cache, A2 = forwardPropagation(X, Y, parameters)
gradients = backwardPropagation(X, Y, cache)
parameters = updateParameters(parameters, gradients, learningRate)
# Evaluating the performance
plt.figure()
plt.plot(losses)
plt.title("Implementation of Artificial Neural Network for AND Logic Gate with 2-bit Binary Input")
plt.xlabel("EPOCHS")
plt.ylabel("Loss value")
plt.show()
# Testing
X = np.array([[1, 1, 0, 0], [0, 1, 0, 1]]) # AND input
cost, _, A2 = forwardPropagation(X, Y, parameters)
prediction = (A2 > 0.5) * 1.0
# print(A2)
print("INPUT: \n"+str(X))
print("OUTPUT: " + str(prediction))
"""#Implementation of Artificial Neural Network for OR Logic Gate with 2-bit Binary Input"""
# Import Python Libraries
import numpy as np
from matplotlib import pyplot as plt
# Sigmoid Function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# Initialization of the neural network parameters
# Initialized all the weights in the range of between 0 and 1
# Bias values are initialized to 0
def initializeParameters(inputFeatures, neuronsInHiddenLayers, outputFeatures):
W1 = np.random.randn(neuronsInHiddenLayers, inputFeatures)
W2 = np.random.randn(outputFeatures, neuronsInHiddenLayers)
b1 = np.zeros((neuronsInHiddenLayers, 1))
b2 = np.zeros((outputFeatures, 1))
parameters = {"W1" : W1, "b1": b1,
"W2" : W2, "b2": b2}
return parameters
# Forward Propagation
def forwardPropagation(X, Y, parameters):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]
Z1 = np.dot(W1, X) + b1
A1 = sigmoid(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2)
logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y))
cost = -np.sum(logprobs) / m
return cost, cache, A2
# Backward Propagation
def backwardPropagation(X, Y, cache):
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2) = cache
dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T) / m
db2 = np.sum(dZ2, axis = 1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, A1 * (1- A1))
dW1 = np.dot(dZ1, X.T) / m
db1 = np.sum(dZ1, axis = 1, keepdims = True) / m
gradients = {"dZ2": dZ2, "dW2": dW2, "db2": db2,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
# Updating the weights based on the negative gradients
def updateParameters(parameters, gradients, learningRate):
parameters["W1"] = parameters["W1"] - learningRate * gradients["dW1"]
parameters["W2"] = parameters["W2"] - learningRate * gradients["dW2"]
parameters["b1"] = parameters["b1"] - learningRate * gradients["db1"]
parameters["b2"] = parameters["b2"] - learningRate * gradients["db2"]
return parameters
# Model to learn the OR truth table
X = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) | |
# Copyright 2015 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from collections import OrderedDict
from textwrap import dedent
from .compiler import BUILTIN
from .helpers import doc_helper, indent, Code
not_implemented_template = """\
throw TypeError, '`{clazz}.{name}` is an abstract method';""".format
## Packaging
def package(name, version, packages, srcs, deps):
files = OrderedDict()
files.update(srcs)
dependencies = ",\n ".join(['"%s": "%s"' % d[1:] for d in deps])
for path, readme in packages.items():
files["%s/README.md" % "/".join(path)] = readme
files["index.js"] = str(Code(comment, "%s\n" % "\n".join([import_(path, (), None) for path in packages])))
files["package.json"] = """
{
"name": "%s",
"version": "%s",
"main": "index.js",
"dependencies": {
%s
}
}
""" % (name, version, dependencies)
return files
def class_file(path, name, fname):
if path:
return "/".join(path + ["index.js"])
else:
return "%s.js" % fname
def function_file(path, name, fname):
return class_file(path, name, fname)
def package_file(path, name, fname):
return "/".join(path + [name, "index.js"])
def make_class_file(path, name, rtloc=BUILTIN, why="class"):
what = "/".join(list(path) + [name])
head=dedent('''\
var _qrt = require("%s/quark_runtime.js");
_qrt.plugImports("%s");
''' % (rtloc, what))
tail=dedent('''\n\
_qrt.pumpImports("%s");
''' % (what))
return Code(comment, head=head, tail=tail)
def make_function_file(path, name, mdpkg, rtloc=BUILTIN):
return make_class_file(path, name, rtloc=rtloc, why="function")
def make_package_file(path, name, rtloc=BUILTIN):
return make_class_file(path, name, rtloc=rtloc, why="package")
def main_file(name):
return "%s.js" % name
def make_main_file(name):
return Code(comment)
def main_prolog():
return "exports.call_main = function () { main(process.argv.slice(1)); }"
def main(path, name):
return invoke_function(path, name, ()) + ";"
## Naming and imports
SUBS = {"self": "this", "super": "super_"}
def name(n):
return SUBS.get(n, n)
def type(path, name, parameters):
return ".".join(path + [name])
def import_(path, origin, dep, seen=None, lazy=False):
qual = qualify(path, origin)
if seen is not None:
if qual[0] in seen:
return ""
seen.add(qual[0])
extra = ""
if dep:
req = dep
extra = ".%s" % qual[0]
else:
if tuple(origin) + tuple(qual) == tuple(path):
prefix = "./"
else:
prefix = "../"*len(origin)
req = prefix + qual[0] + "/index.js"
if lazy:
return dedent(
"""\
var %s; _qrt.lazyImport('%s', function(){
%s = require('%s')%s;
exports.%s = %s;
});
""") % (qual[0], req,
qual[0], req, extra,
qual[0], qual[0])
else:
return "var %s = require('%s')%s;\nexports.%s = %s;" % (
qual[0], req, extra, qual[0], qual[0])
def qualify(package, origin):
if package == origin: return []
if not package: return []
if not origin:
return package
elif package[:len(origin)] == origin:
return package[len(origin):]
else:
return package
def native_include(path, du_name):
assert path.endswith(".js"), path
assert "/" not in path, (path, "Subpackage native inclusion not implemented for JS")
return """var %s = require("%s/%s");\n""" % (path[:-3], du_name, path)
## Documentation
def doc(lines):
return doc_helper(lines, "/**", " * ", " */")
def add_doc(doc, lines):
if doc:
docl = doc.splitlines()
else:
#docl = ["/**", " */"] # Use this if we want to output for undocumented items
return "" # Using this because we don't want that noise
# Allow calling with a (presumably one-line) string instead of a list
if str(lines) == lines:
lines = [lines]
for line in lines:
docl[-1:-1] = [" * " + line]
return "\n".join(docl) + "\n"
def add_param_doc(doc, params):
return add_doc(doc, ["@param {*} %s" % param for param in params])
## Comments
def comment(stuff):
return "/* %s */\n" % stuff
## Class definition
def clazz(doc, abstract, clazz, parameters, base, interfaces, static_fields, fields, constructors, methods):
if base: fields = [base + ".prototype.__init_fields__.call(this);"] + fields
doc = add_doc(doc, "@class %s" % clazz)
result = "\n// CLASS %s\n" % clazz + doc
result += "\n".join(constructors)
if base:
result += "_qrt.util.inherits(%s, %s);\n" % (clazz, base)
result += "\nfunction %s__init_fields__() {" % clazz + indent("\n".join(fields)) + "}\n"
result += "%s.prototype.__init_fields__ = %s__init_fields__;\n" % (clazz, clazz)
result += "\n".join("_qrt.lazyStatic(function(){%s});" % x for x in static_fields)
result += "\n".join(methods)
return result
def static_field(doc, clazz, type, name, value):
doc = add_doc(doc, "@static")
return "%s%s.%s = %s;" % (doc, clazz, name, value or "null")
def field(doc, clazz, type, name, value):
return "%sthis.%s = %s;" % (doc, name, value or "null")
def field_init():
return "this.__init_fields__();"
def default_constructor(clazz):
return "function %s() {\n this.__init_fields__();\n}\nexports.%s = %s;\n" % \
(clazz, clazz, clazz)
def constructor(doc, name, parameters, body):
return "\n%sfunction %s(%s)%s\nexports.%s = %s;\n" % \
(doc, name, ", ".join(parameters), body, name, name)
def method(doc, clazz, type, name, parameters, body):
doc = add_doc(doc, ["@method %s" % name, "@memberof %s" % clazz, "@instance"])
doc = add_param_doc(doc, parameters)
params = ", ".join(parameters)
full_name = "%s_%s" % (clazz, name)
trailer = "%s.prototype.%s = %s;" % (clazz, name, full_name)
return "\n%sfunction %s(%s)%s\n" % (doc, full_name, params, body) + trailer
def static_method(doc, clazz, type, name, parameters, body):
doc = add_doc(doc, ["@memberof %s" % clazz, "@static"])
doc = add_param_doc(doc, parameters)
params = ", ".join(parameters)
full_name = "%s_%s" % (clazz, name)
trailer = "%s.%s = %s;" % (clazz, name, full_name)
return "\n%sfunction %s(%s)%s\n" % (doc, full_name, params, body) + trailer
def abstract_method(doc, clazz, type, name, parameters):
doc = add_doc(doc, ["@abstract", "@memberof %s" % clazz, "@instance"])
doc = add_param_doc(doc, parameters)
params = ", ".join(parameters)
full_name = "%s_%s" % (clazz, name)
trailer = "%s.prototype.%s = %s;" % (clazz, name, full_name)
body = not_implemented_template(clazz=clazz, name=name)
return "\n%sfunction %s(%s) { %s }\n" % (doc, full_name, params, body) + trailer
## Interface definition
def interface(doc, iface, parameters, bases, static_fields, methods):
doc = add_doc(doc, "@interface")
return clazz(doc, False, iface, parameters, None, [], static_fields, [], [default_constructor(iface)], methods)
def interface_method(doc, iface, type, name, parameters, body):
params = ", ".join(parameters)
full_name = "%s_%s" % (iface, name)
trailer = "%s.prototype.%s = %s;" % (iface, name, full_name)
doc = add_doc(doc, ["@method %s" % name, "@memberof %s" % iface, "@instance"])
if body is None:
body = " { %s }" % not_implemented_template(clazz=iface, name=name)
add_doc(doc, "@abstract")
doc = add_param_doc(doc, parameters)
return "\n%sfunction %s(%s)%s\n" % (doc, full_name, params, body) + trailer
## Function definition
def function(doc, type, name, parameters, body):
trailer = "exports.%s = %s;" % (name, name)
doc = add_doc(doc, "@function")
doc = add_param_doc(doc, parameters)
return "\n%sfunction %s(%s)%s\n" % (doc, name, ", ".join(parameters), body) + trailer
## Parameters for methods and functions
def param(type, name, value):
if value is None:
return "%s" % name
else:
return "%s = %s" % (name, value)
## Blocks
def block(statements):
return " {%s}" % indent("\n".join(statements))
## Statements
def local(type, name, value):
return "var %s = %s;" % (name, value or "null")
def expr_stmt(e):
return "%s;" % e
def assign(lhs, rhs):
return "%s = %s;" % (lhs, rhs)
def if_(pred, cons, alt):
result = "if (%s)%s" % (pred, cons)
if alt:
result += " else%s" % alt
return result
def while_(cond, body):
return "while (%s)%s" % (cond, body)
def break_():
return "break;"
def continue_():
return "continue;"
def return_(expr):
if expr:
return "return %s;" % expr
else:
return "return;"
## Expressions
def class_ref(v):
return v
def method_ref(v):
return "this.%s" % v
def field_ref(v):
return "this.%s" % v
def local_ref(v):
return v
def invoke_function(path, name, args):
return "%s(%s)" % (".".join(path + [name]), ", ".join(args))
def construct(clazz, args):
return "new %s(%s)" % (clazz, ", ".join(args))
def invoke_super(clazz, base, args):
return "%s.super_.call(%s)" % (clazz, ", ".join(["this"] + args))
def invoke_method(expr, method, args):
return "(%s).%s(%s)" % (expr, method, ", ".join(args))
def invoke_method_implicit(method, args):
return "this.%s(%s)" % (method, ", ".join(args))
def invoke_super_method(clazz, base, method, args):
return "this.constructor.super_.prototype.%s.call(%s)" % (method, ", ".join(["this"] + args))
def invoke_static_method(path, clazz, method, args):
return "%s.%s(%s)" % (".".join(path + [clazz]), method, ", ".join(args))
def get_static_field(path, clazz, field):
return "%s.%s" % (".".join(path + [clazz]), field)
def get_field(expr, field):
return "(%s).%s" % (expr, field)
def cast(type, expr):
if type == '': # TODO why does this happen?
return expr
assert expr
template = '_qrt.cast({expr}, function () {{ return {type}; }})'.format
return template(expr=expr, type=type)
## Literals
def null():
return "null"
def bool_(b):
return b.text
def number(n):
assert n.text
if n.text[-1:] == "L":
return n.text[:-1]
else:
return n.text
def string(s):
result = s.text[0]
idx = 1
while idx < len(s.text) - 1:
c = s.text[idx]
next = s.text[idx + 1]
if c == "\\" and next == "x":
result += "\\u00"
idx += 1
else:
result += c
idx += 1
result += s.text[-1]
return result
def list_(elements):
return "[%s]" % ", ".join(elements)
def map(entries):
return | |
# -*- coding: iso-8859-1 -*-
"""
Create files (from Rugheimer metadata) that give the atmospheric profile, i.e. mixing ratio, temperature and pressure as a function of altitude.
Since the Rugheimer T/P and mixing ratio files are generated from different codes, they have different abscissa, and so different files are generated for them. Interpolation is used in our code to match the two files.
"""
import numpy as np
import pdb
import matplotlib.pyplot as plt
import scipy.stats
from scipy import interpolate as interp
import cookbook
def extract_profiles_primitive_earth_rugheimer():
"""
Purpose of this code is to form spectra, mixing ratio files, and T/P profiles for the revised Rugheimer Epoch 0 (3.9 Ga) Earth models. This is to triangulate the sources of our differences.
"""
#####Zeroth: set value of constants, specify filenames
import cookbook
filename='./Raw_Data/Rugheimer_Metadata/outchem_Ep0_A0.2_Frac1.0.dat'
bar2Ba=1.0e6 #1 bar in Ba
k=1.3806488e-16 #Boltzmann Constant in erg/K
#####First, form the spectra for comparison.
importeddata=np.genfromtxt(filename, skip_header=290, skip_footer=1277)
#Remove the first wavelength bin which corresponds to Lyman Alpha and which does not have a bin width that fits with its neighbors.
rugheimer_wav_centers=importeddata[1:,1]/10. #Convert wavelengths from Angstroms to nm
rugheimer_s=importeddata[1:,4] #ratio of 4piJ(surf)/I_0
rugheimer_s[19]=3.16548e-128 #one element of rugheimer_s has value 3.16548e-128. Python has trouble with this and imports as a NaN. Here, we manually set its value.
###Form wavelength bins from Rugheimer wavelength centers
rugheimer_wav_bin_leftedges=np.zeros(len(rugheimer_wav_centers))
rugheimer_wav_bin_rightedges=np.zeros(len(rugheimer_wav_centers))
#First ten FUV fluxes are 5 nm (50 A) bins (email from <EMAIL>, 3/12/2015)
rugheimer_wav_bin_leftedges[0:9]=rugheimer_wav_centers[0:9]-2.5
rugheimer_wav_bin_rightedges[0:9]=rugheimer_wav_centers[0:9]+2.5
#Remainder of FUV fluxes are taken from a file that sarah sent me (<EMAIL>, 3/12/2015)
del importeddata
importeddata=np.genfromtxt('./Raw_Data/Rugheimer_Metadata/Active_M9_Teff2300_photo.pdat', skip_header=1, skip_footer=0)
rugheimer_wav_bin_leftedges[9:]=importeddata[:,2]*0.1 #convert A to nm
rugheimer_wav_bin_rightedges[9:]=importeddata[:,3]*0.1 #convert A to nm
####Check that bins are correct:
###print np.sum(rugheimer_wav_centers-0.5*(rugheimer_wav_bin_leftedges+rugheimer_wav_bin_rightedges)) #0 to within 1e-12 rounding error.
###Rebin Claire et al input.
#Import 0.01-nm resolution Claire et al 3.9 Ga Sun model.
del importeddata
importeddata=np.genfromtxt('./Raw_Data/Claire_Model/claire_youngsun_highres.dat', skip_header=1, skip_footer=0)
claire_wav=importeddata[:,0] #nm, 0.01 nm resolution
claire_fluxes=importeddata[:,1]#erg/s/cm2/nm
#Bin Claire et al model to resolution of Rugheimer model
claire_fluxes_rebinned=np.zeros(len(rugheimer_wav_centers))
claire_wav_rebinned=np.zeros(len(claire_fluxes_rebinned))#This should be redundant with rugheimer_wav_centers. We include it as a check statistic that the rebinning is proceeding appropriately.
for ind in range(0, len(rugheimer_wav_centers)):
min_wav=rugheimer_wav_bin_leftedges[ind]
max_wav=rugheimer_wav_bin_rightedges[ind]
inds=(claire_wav >= min_wav) & (claire_wav <= max_wav)
claire_fluxes_rebinned[ind]=np.mean(claire_fluxes[inds])
claire_wav_rebinned[ind]=np.mean(claire_wav[inds]) #check statistic.
###print np.sum((claire_wav_rebinned-rugheimer_wav_centers)/rugheimer_wav_centers) #check statistic. Good to within 1e-5 in all cases. Any problems caused by slight misalignment from 0.01 due to rounding error. Good enough.
###Compute bottom-of-atmosphere actinic flux, which is what is reported in Rugheimer+2015.
rugheimer_ground_energies=claire_fluxes_rebinned*rugheimer_s
#Let's print out the results
spectable=np.zeros([len(rugheimer_wav_bin_leftedges),5])
spectable[:,0]=rugheimer_wav_bin_leftedges
spectable[:,1]=rugheimer_wav_bin_rightedges
spectable[:,2]=rugheimer_wav_centers
spectable[:,3]=claire_fluxes_rebinned
spectable[:,4]=rugheimer_ground_energies
header='Left Bin Edge (nm) Right Bin Edge (nm) Bin Center (nm) Solar Flux at Earth (erg/s/nm/cm2) 3.9 Ga BOA Intensity (erg/s/nm/cm2)\n'
f=open('./LiteratureSpectra/rugheimer_epoch0_recomputed_A0.2.dat', 'w')
f.write(header)
np.savetxt(f, spectable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Second, form the mixing ratio files
importeddata1=np.genfromtxt(filename, skip_header=779, skip_footer=873) #O2, O3, H2O
importeddata2=np.genfromtxt(filename, skip_header=837, skip_footer=817) #CH4, SO2
importeddata4=np.genfromtxt(filename, skip_header=958, skip_footer=704) #N2, CO2
#Let's print out the results. We have established that the z values are the same, so can use a common block
printtable=np.zeros([np.shape(importeddata1)[0],9])
printtable[:,0]=importeddata1[:,0] #altitude in cm
#N2 and CO2: We use the values from this block rather than block 1 because rugheimer et al force it to these values in their code, regardless of what the photochemistry code wants to do.
printtable[:,1]=importeddata4[:,2] #N2.
printtable[:,2]=importeddata4[:,1] #CO2
#The rest are normal
printtable[:,3]=importeddata1[:,3] #H2O
printtable[:,4]=importeddata2[:,2] #CH4
printtable[:,5]=importeddata2[:,9] #SO2
printtable[:,6]=importeddata1[:,2] #O2
printtable[:,7]=importeddata1[:,8] #O3
#printtable[:,8]# H2S; left as zeros since not included in Rugheimer model
#print np.sum(printtable[:,1:],1)
#pdb.set_trace()
header0='Extracted from Rugheimer outchem_Ep0_A0.2_Frac1.0.dat\n'
header1='Z (cm) N2 CO2 H2O CH4 SO2 O2 O3 H2S \n'
f=open('./MixingRatios/rugheimer_earth_epoch0_recomputed_A0.2_mixingratios_v2.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Third, form the T/P profiles
#Extract temperature and pressure profile from climate model output
#For whatever reason the very last line of the table is doubled. We remove this.
importeddata=np.genfromtxt(filename, skip_header=1568, skip_footer=104)
model_z=importeddata[:-1,0] #altitude in cm
model_t=importeddata[:-1,1] #temperature in K
model_n=importeddata[:-1,3] #number density in cm**-3.
model_p=importeddata[:-1,4] #pressure, in bar (based on text in draft manuscript sent to me by <NAME>)
#Let's print out the results
printtable=np.zeros([len(model_z)+1,4])
printtable[1:,0]=model_z
printtable[1:,1]=model_t
printtable[1:,2]=model_n
printtable[1:,3]=model_p
#Rugheimer data file does not explicitly include t, P, n at z=0 (Surface). Our code requires z=0 data. To reconcile, we include these data manually as follows:
printtable[0,0]=0. #z=0 case
printtable[0,3]=1. #In the paper, p=1.0 bar at surface is specified
printtable[0,1]=292.95 #From linear extrapolation from z=0.5 km and z=1.5 km points
printtable[0,2]= 1.*bar2Ba/(k*292.95)#Compute number density self-consistently from temperature, pressure via Ideal Gas Law as is done elsewhere (n [cm**-3] = p [Barye]/(k*T [K])
header0='Extracted from Rugheimer outchem_Ep0_A0.2_Frac1.0.dat\n'
header1='Z (cm) T (K) DEN (cm**-3) P (bar) \n'
f=open('./TPProfiles/rugheimer_earth_epoch0_recomputed_A0.2_atmosphereprofile.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
#extract_profiles_primitive_earth_rugheimer()
def extract_profiles_modern_earth_rugheimer():
"""
Purpose of this code is to form spectra, mixing ratio files, and T/P profiles for the Rugheimer+2014 modern Earth surface UV models. This is a test case.
"""
#####Zeroth: set value of constants, specify filenames
import cookbook
filename='./Raw_Data/Rugheimer_Metadata/output_couple_Sun_100.dat'
bar2Ba=1.0e6 #1 bar in Ba
k=1.3806488e-16 #Boltzmann Constant in erg/K
#####First, form the spectra for comparison.
#Extract spectra from Rugheimer file
importeddata=np.genfromtxt(filename, skip_header=286, skip_footer=102)
#Remove the first wavelength bin which corresponds to Lyman Alpha and which does not have a bin width that fits with its neighbors.
spec_wav=importeddata[1:,0]*0.1 #A to nm
spec_top=importeddata[1:,1]*1.e3 #W/m^2/nm to erg/cm^2/s/nm
spec_gnd=importeddata[1:,2]*1.e3 #W/m^2/nm to erg/cm^2/s/nm
#two elements of the file are not importing correctly, set them manually here
spec_gnd[23]=2.92059e-121*1.e3
spec_gnd[24]=1.57780e-102 *1.e3
#Next, extract the edges of the spectral bins.
bin_left_edges=np.zeros(np.shape(spec_wav))
bin_right_edges=np.zeros(np.shape(spec_wav))
#first 9 bins are 5-nm (50 angstrom) wide bins (See faruv_sun.pdat)
bin_left_edges[0:9]=spec_wav[0:9]-2.5
bin_right_edges[0:9]=spec_wav[0:9]+2.5
#The edges for the rest of the bins can be taken from G2V_photo.pdat:
importeddata=np.genfromtxt('./Raw_Data/Rugheimer_Metadata/G2V_photo.pdat', skip_header=1, skip_footer=0)
bin_left_edges[9:]=importeddata[:,2]*0.1 #convert from A to nm
bin_right_edges[9:]=importeddata[:,3]*0.1 #convert from A to nm
###let's validate our bin edges by computing the bin centers and making sure the residuals aren't too high
##diff=(0.5*(bin_left_edges+bin_right_edges)-spec_wav)#/spec_wav
##print diff
##print np.max(np.abs(diff))
###this test shows very slight offsets, at the 0.05 nm level at maximum. Should not affect results given bins are >1nm in width.
#Let's print out the results
printtable=np.zeros([len(bin_left_edges),5])
printtable[:,0]=bin_left_edges
printtable[:,1]=bin_right_edges
printtable[:,2]=spec_wav
printtable[:,3]=spec_top
printtable[:,4]=spec_gnd
header='Left Bin Edge (nm) Right Bin Edge (nm) Bin Center (nm) TOA Flux (erg/s/nm/cm2) BOA Actinic Flux (erg/s/nm/cm2) \n'
f=open('./LiteratureSpectra/rugheimer_earth_modern.dat', 'w')
f.write(header)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Second, form the mixing ratio files
importeddata1=np.genfromtxt(filename, skip_header=78, skip_footer=323) #water, methane
importeddata2=np.genfromtxt(filename, skip_header=182, skip_footer=222) #ozone, must derive from number density
#Let's print out the results. We have established that the z values are the same, so can use a common block
printtable=np.zeros([np.shape(importeddata1)[0],9])
printtable[:,0]=importeddata1[:,0]*1.e5 #altitude in cm (converted from km)
#N2 O2, and CO2: Well-mixed
#H2O, CH4, O3: tracked through atmosphere
#SO2: Not tracked. Assume 0.
printtable[:,1]=printtable[:,1]+ 0.78#N2; level tuned to assure 1 bar of surface pressure. Earth mean value given here.
printtable[:,2]=printtable[:,2]+355.e-6 #CO2; level directly quoted in paper
printtable[:,3]=importeddata1[:,2] #H2O
printtable[:,4]=importeddata1[:,4] #CH4
#printtable[:,5]=printtable[:,5] #SO2; left as zeros since not included in the model
printtable[:,6]=printtable[:,6]+0.21 #O2; level directly quoted in paper
printtable[:,7]=importeddata2[:,4]/importeddata2[:,2]#O3
#printtable[:,8]=printtable[:,8]# H2S; left as zeros since not included in the model
header0='Extracted from Rugheimer output_couple_Sun_100.dat\n'
header1='Z (cm) N2 CO2 H2O CH4 SO2 O2 O3 H2S\n'
f=open('./MixingRatios/rugheimer_earth_modern_mixingratios_v2.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
####Third, form the T/P profiles
N_A=6.022e23 #Avogadro's number
bar2Ba=1.0e6 #1 bar in Ba
atm2bar=1.01325 #1 atm in bar
k=83.14472/N_A #Boltzman constant in bar*cm^3/K, converted from bar*cm^3/(K*mol) (from http://www.engineeringtoolbox.com/individual-universal-gas-constant-d_588.html)
#Extract temperature and pressure profile from climate model output
importeddata=np.genfromtxt(filename, skip_header=409, skip_footer=0)
model_z=importeddata[::-1,1]*1.e5 #altitude in cm, converted from km
model_t=importeddata[::-1,2] #temperature in K
model_p=importeddata[::-1,0]*atm2bar #pressure, in bar, converted from atm.
model_n=model_p/(model_t*k) #number density in cm**-3, computed from ideal gas law.
#Let's print out the results
printtable=np.zeros([len(model_z),4])
printtable[:,0]=model_z
printtable[:,1]=model_t
printtable[:,2]=model_n
printtable[:,3]=model_p
header0='Extracted from Rugheimer output_couple_Sun_100.dat\n'
header1='Z (cm) T (K) DEN (cm**-3) P (bar) \n'
f=open('./TPProfiles/rugheimer_earth_modern_atmosphereprofile.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
#extract_profiles_modern_earth_rugheimer()
def form_profiles_wuttke():
"""
Purpose of this code is to form the feedstock files to replicat the Wuttke+2006 Antarctic diffuse radiance measurements
"""
import cookbook
#First, form the spectral file.
#Define spectral bins. 0.25 nm from 280-500 nm, 1 nm from 500-1000 nm. We just go to 900 since that's what our data is good to. Also we start at 292.75 because that's where our graphclicked data starts
bin_left_edges=np.concatenate((np.arange(292.75,500.,0.25),np.arange(500., 900.,1.)))
bin_right_edges=np.concatenate((np.arange(293.,500.25,0.25),np.arange(501., 901.,1.)))
bin_centers=0.5*(bin_left_edges+bin_right_edges)
#load BOA diffuse zenith flux from Wuttke+2006 (extracted via GraphClick)
importeddata=np.genfromtxt('./Raw_Data/UV_Surface_Measurements/wuttke.csv', skip_header=0, skip_footer=0, delimiter=',')
dif_wav=importeddata[:,0] #nm
dif_flux=importeddata[:,1]*2.*np.pi #mW/m2/nm/sr=erg/s/cm2/nm/sr; multiply by 2pi to convert to hemisphere-integrated total surface diffuse radiances
dif_func=interp.interp1d(dif_wav, dif_flux, kind='linear')
dif_flux_interp=dif_func(bin_centers)
#load solar spectrum from Claire et al (2012) models, normalized to 1 au
importeddata=np.genfromtxt('./Raw_Data/Claire_Model/claire_modernsun_highres.dat', skip_header=1, skip_footer=0)
claire_wav=importeddata[:,0] #nm, 0.1 nm resolution, 100-900 nm.
claire_fluxes=importeddata[:,1]#erg/s/cm2/nm
#rebin claire spectrum
claire_fluxes_rebinned=cookbook.rebin_uneven(np.arange(99.995,900.005,0.01), np.arange(100.005, 900.015,0.01),claire_fluxes,bin_left_edges, bin_right_edges)
#Plot to make sure rebinning worked correctly
fig, ax1=plt.subplots(1, figsize=(6,4))
ax1.plot(claire_wav, claire_fluxes, marker='s', color='black', label='Claire Fluxes')
ax1.plot(bin_centers, claire_fluxes_rebinned, marker='s', color='blue', label='Binned Claire Fluxes')
ax1.set_yscale('log')
ax1.set_ylim([1.e-2, 1.e4])
ax1.set_xlim([280.,900.])
ax1.set_xlabel('nm')
ax1.set_ylabel('erg/s/cm2/nm')
ax1.legend(loc=0)
plt.show()
#Let's print out the results
spectable=np.zeros([len(bin_left_edges),5])
spectable[:,0]=bin_left_edges
spectable[:,1]=bin_right_edges
spectable[:,2]=bin_centers
spectable[:,3]=claire_fluxes_rebinned
spectable[:,4]=dif_flux_interp
header='Left Bin Edge (nm) Right Bin Edge (nm) Bin Center (nm) Top of Atm Flux (erg/s/nm/cm2) Zenith Diffuse Flux (erg/s/nm/cm2)\n'
f=open('./LiteratureSpectra/wuttke2006.dat', 'w')
f.write(header)
np.savetxt(f, spectable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Second, form the mixing ratio files
#####Form by replicating the Rugheimer modern Earth profile, then scaling down the H2O level and scaling up the O3 level.
mixingratios=np.genfromtxt('./MixingRatios/rugheimer_earth_modern_mixingratios_v2.dat', skip_header=2, skip_footer=0)
mixingratios[:,3]=mixingratios[:,3]*0.1 #scale down h2o by factor of 10
mixingratios[:,7]=mixingratios[:,7]*1.25 #scale up ozone by factor of 1.25
header0='Based on Rugheimer+2013 Modern Earth Model\n'
header1='Z (cm) N2 CO2 H2O CH4 SO2 O2 O3 H2S\n'
f=open('./MixingRatios/wuttke2006_mixingratios_v2.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, mixingratios, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Finally, form TP profile
#####Form by duplicating Rugheimer+2013 modern Earth profile
tpprofile=np.genfromtxt('./TPProfiles/rugheimer_earth_modern_atmosphereprofile.dat', skip_header=2, skip_footer=0)
header0='Based on Rugheimer+2013 Modern Earth Model\n'
header1='Z (cm) T (K) DEN (cm**-3) P (bar) \n'
f=open('./TPProfiles/wuttke2006_atmosphereprofile.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, tpprofile, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
#form_profiles_wuttke()
def form_profiles_woudc():
"""
Purpose of this code is to form the feedstock files to replicate the irradiance measurements from the WOUDC website for Toronto (June 21 2003, SZA=20.376, O3=354, Brewer no. 145)
"""
########First, form the spectral file.
#load measured irradiances
importeddata=np.genfromtxt('./Raw_Data/UV_Surface_Measurements/woudc_toronto_2003_145_cut.dat', skip_header=1, skip_footer=0, delimiter=' ')
woudc_wav=importeddata[:,0] #nm
woudc_flux=importeddata[:,1]*1.e3 #W/m2/nm=1000 erg/s/cm2/nm
#woudc_func=interp.interp1d(woudc_wav, woudc_flux, kind='linear')
#woudc_flux_interp=dif_func(bin_centers)
#Define spectral bins.
bin_centers=woudc_wav
bin_left_edges=woudc_wav-0.25
bin_right_edges=woudc_wav+0.25
#load | |
<reponame>sariths/stadicViewer
"""This module enables the GUI, buttons and control logic for Room-based plots."""
# coding=utf-8
from __future__ import print_function
import bisect
import numbers
import sys
import numpy as np
from PyQt4 import QtCore,QtGui
from dataStructures.timeSeries import TimeArray
from readStadicData.processVisData import VisData
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from dataStructures.dayIll import Dayill
from readStadicData.parseJson import StadicProject
from pyqtGui.gui import Ui_Form
from plotFunctions.gridPlots import gridPlot
#~~~~~DevNotes: 25Mar2016:~~~~~~~
# Fixed display limits to conform to illuminance units.
# Moved all the connect signals to the end so that they don't get triggered..
# in the beginning the readStadicData is being loaded into all the comboBoxes and textBoxes
# TODO: Add a Qmessagebox to show an error message in case the grid..
# TODO:..spacings aren't uniform.
# TODO: Fix the calendar year. (Almost done) !
# TODO: Mask missing points.
class NavigationToolbarStadic(NavigationToolbar):
dataDescr = None
dataType = None
def mouse_move(self, event):
self._set_cursor(event)
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
artists = [a for a in event.inaxes.mouseover_set
if a.contains(event)]
if artists:
a = max(enumerate(artists), key=lambda x: x[1].zorder)[1]
if a is not event.inaxes.patch:
data = a.get_cursor_data(event)
if isinstance(data,numbers.Number):
if self.dataDescr:
s += " {} ".format(self.dataDescr)
if self.dataType:
if self.dataType == 'lux':
dataVal = int(data)
elif self.dataType == 'fc':
dataVal = round(data,3)
else:
dataVal = round(data*100,3)
s += '{}'.format(dataVal)
if self.dataType != "%":
s += ' {}'.format(self.dataType)
else:
s += '{}'.format(self.dataType)
else:
s = ''
if data is np.NaN or data < 0 :
s = ''
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pick_event(self,event):
print(event.ind)
class Spatial(QtGui.QDialog, Ui_Form,VisData):
def setupGui(self):
if self.dataSpaceNameSelected:
self.tabWidget.setEnabled(True)
#Set the calendar as per the starting year.
self.calSpaceDateTimeIllum.setMinimumDateTime(
QtCore.QDateTime(self.dataYear,1,1,0,0))
self.calSpaceDateTimeIllum.setDateTime(
QtCore.QDateTime(self.dataYear, 1, 1, 0, 0))
self.calSpaceDateTimeIllum.setMaximumDateTime(
QtCore.QDateTime(self.dataYear,12,31,23,59))
self.grpContoursIlluminance.setEnabled(True)
self.btnSpaceSettingsContour.setEnabled(True)
#TODO: Change the visiblity settings to True later ;)
self.btnSpaceSettingsContour.setVisible(True)
#Setup matplotlib inside Qt.
self.spFigure = Figure()
self.spCanvas = FigureCanvas(self.spFigure)
#Validator for setting values
floatValidator = QtGui.QDoubleValidator(0.0,20000.0,3)
#Settings for showing and hiding color and contours.
self.grpColoursIlluminance.setVisible(False)
self.grpContoursIlluminance.setVisible(False)
#Initiate a dictioanry for ill files.
self.spAllFilesDict = {}
#Code for manipulating navigation settings for illuminance.
#Code for manipulating navigation settings for illuminance.
self.spTimeStepIlluminance = 1 #This attribute determines the time step for stepping between different illuminance plots.
#Changing/clicking any of the below controls should trigger the illuminance plots.
self.spIlluminanceActivated = False
self.spCurrentIlluminanceHour = 9
units = self.dataProject.unitsIlluminance
unitsMultiplier = {'lux':1,'fc':0.1}[str(units)]
self.spIlluminanceMaxVal = 5000*unitsMultiplier
self.spIlluminanceMinVal = 0
self.spIlluminanceMaxValDefault = 5000*unitsMultiplier
self.spIlluminanceMinValDefault = 0
self.spIlluminanceUpperMaskValue = None
self.spIlluminanceLowerMaskValue = None
self.spIlluminanceUpperMaskColor = None
self.spIlluminanceLowerMaskColor = None
self.spElectricMaxVal = 400 * unitsMultiplier
self.spElectricMinVal = 0
self.spElectricMaxValDefault = 400 * unitsMultiplier
self.spElectricMinValDefault = 0
self.spElectricUpperMaskValue = None
self.spElectricLowerMaskValue = None
self.spElectricUpperMaskColor = None
self.spElectricLowerMaskColor = None
self.spMetricsMaxVal = 1.0
self.spMetricsMinVal = 0.0
self.spMetricsMaxValDefault = 1.0
self.spMetricsMinValDefault = 0
self.spMetricsUpperMaskValue = None
self.spMetricsLowerMaskValue = None
self.spMetricsUpperMaskColor = None
self.spMetricsLowerMaskColor = None
self.spCurrentPlotIsIlluminance = True
self.spCurrentPlotIsElectric = False
self.txtSpaceColorsMax.setText(str(self.spIlluminanceMaxValDefault))
self.txtSpaceColorsMin.setText(str(self.spIlluminanceMinValDefault))
self.txtSpaceColorsMax.setValidator(floatValidator)
self.txtSpaceColorsMin.setValidator(floatValidator)
self.spPlotIlluminanceColors = True
#Put all contourboxes inside a list for easy iteration.
self.spContourBoxes = [self.txtSpaceCountourValue1, self.txtSpaceCountourValue2, self.txtSpaceCountourValue3, self.txtSpaceCountourValue4,
self.txtSpaceCountourValue5, self.txtSpaceCountourValue6, self.txtSpaceCountourValue7, self.txtSpaceCountourValue8]
for contourBox in self.spContourBoxes:
contourBox.setValidator(floatValidator)
self.spContourValuesIlluminance = (50, 100, 500, 1000, 2000, 3000, 5000, 10000)
self.spContourValuesIlluminance = map(lambda x:x*unitsMultiplier,self.spContourValuesIlluminance)
self.spContourValuesElectric = (50, 100, 150, 200, 250, 300, 350, 400)
self.spContourValuesElectric = map(lambda x:x*unitsMultiplier,self.spContourValuesElectric)
self.spContourValuesIlluminanceDefault = (50, 100, 500, 1000, 2000, 3000, 5000, 10000)
self.spContourValuesIlluminanceDefault = map(lambda x:x*unitsMultiplier,self.spContourValuesIlluminanceDefault)
self.spContourValuesElectricDefault = (50, 100, 150, 200, 250, 300, 350, 400)
self.spContourValuesElectricDefault = map(lambda x:x*unitsMultiplier,self.spContourValuesElectricDefault)
for idx,contourBox in enumerate(self.spContourBoxes):
contourBox.setText(str(self.spContourValuesIlluminance[idx]))
self.spContourValuesMetrics = (0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.9, 1.0)
self.spContourValuesMetricsDefault = (0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.9, 1.0)
#Contstuctor Stuff
self.spColorMapTuple = (('Uniform01', 'viridis'), ('Uniform02', 'inferno'), ('Uniform03', 'plasma'), ('Uniform04', 'magma'), ('Blues', 'Blues'),
('BlueGreen','BuGn'), ('BluePurple','BuPu'), ('GreenBlue','GnBu'), ('Greens','Greens'), ('Greys','Greys'), ('Oranges','Oranges'),
('OrangeRed','OrRd'), ('PurpleBlue','PuBu'), ('PurpleBlueGreen','PuBuGn'), ('PurpleRed','PuRd'), ('Purples','Purples'),
('RedPurple','RdPu'), ('Reds','Reds'), ('YellowGreen','YlGn'), ('YellowGreenBlue','YlGnBu'), ('YellowOrangeBrown','YlOrBr'),
('YellowOrangeRed','YlOrRd'), ('Hot01','afmhot'), ('Hot02','hot'), ('Hot03','gist_heat'), ('Autumn','autumn'), ('Bone','bone'), ('Cool','cool'),
('Copper','copper'), ('Spring','spring'), ('Summer','summer'), ('Winter','winter'))
colorNames = [name for name,plotName in self.spColorMapTuple]
self.spColorDict =dict(self.spColorMapTuple)
self.cmbSpaceColorScheme.addItems(colorNames)
self.cmbSpaceColorScheme.setCurrentIndex(21)
self.spCurrentColorScheme = 'YlOrRd'
self.spCurrentSpaceChartOpacityValue = 1
self.spCurrentColorSchemeMetrics = 'YlOrRd'
self.spCurrentSpaceChartOpacityValueMetrics = 1
self.spCurrentColorSchemeElectric = 'YlOrRd'
self.spCurrentSpaceChartOpacityValueElectric = 1
self.spInterpolateColorScheme= None
self.txtSpaceStatusDisplay.setEnabled(False)
illFileKeys,illFileNames = zip(*self.dataDayIllFilesList)
self.ptsFile = self.dataPtsFile
self.illData = Dayill(illFileNames[0],self.ptsFile)
hourFormat = self.illData.timedata[0:24]
hourFormat = [hourVal['tstamp'].strftime("%I:%M %p") for hourVal in hourFormat]
#Set valid time stamps for all the drop boxes that show time.
self.cmbSpaceTimeIllum.clear()
self.cmbSpaceTimeIllum.addItems(map(str,hourFormat))
self.cmbSpaceTimeIllum.setCurrentIndex(9)
self.cmbSpaceTimeIntervalMax.clear()
self.cmbSpaceTimeIntervalMax.addItems(map(str,hourFormat))
self.cmbSpaceTimeIntervalMax.setCurrentIndex(23)
self.cmbSpaceTimeIntervalMin.clear()
self.cmbSpaceTimeIntervalMin.addItems(map(str,hourFormat))
self.cmbSpaceTimeIntervalMin.setCurrentIndex(0)
# Set valid time stamps for all the drop boxes that show time.
self.cmbCombinedTimeIllum.clear()
self.cmbCombinedTimeIllum.addItems(map(str, hourFormat))
self.cmbCombinedTimeIllum.setCurrentIndex(9)
self.cmbCombinedTimeIntervalMax.clear()
self.cmbCombinedTimeIntervalMax.addItems(map(str, hourFormat))
self.cmbCombinedTimeIntervalMax.setCurrentIndex(23)
self.cmbCombinedTimeIntervalMin.clear()
self.cmbCombinedTimeIntervalMin.addItems(map(str, hourFormat))
self.cmbCombinedTimeIntervalMin.setCurrentIndex(0)
self.spAllFilesDict = self.dataAllFiles
# Addedd this test as sometimes metrics are not calculated. In those cases it's just the illuminance readStadicData.
try:
resultsFiles,resultsFilesNames = zip(*self.dataMetricsFilesList)
if self.dataElectricIllFilesList:
electricFiles,electricFilesNames = zip(*self.dataElectricIllFilesList)
else:
electricFiles=[]
electricFilesNames=[]
mainComboBoxContents = [illFileKeys[0]]+ \
sorted(list(resultsFiles))+ \
sorted(list(electricFiles))
except ValueError:
mainComboBoxContents = [illFileKeys[0]]
self.cmbSpacePlotType.clear()
self.cmbSpacePlotType.addItems(mainComboBoxContents)
self.cmbSpaceSelectIlluminanceFile.clear()
self.cmbSpaceSelectIlluminanceFile.addItems(illFileKeys)
self.cmbCombinedSelectIlluminanceFile.clear()
self.cmbCombinedSelectIlluminanceFile.addItems(illFileKeys)
self.spacePlotTypeDict = self.dataAllFilesAvailable
self.spShadeSchedule = self.dataProject.spaces[self.dataSpaceIndex].scheduleShades
self.spWindowGroupNames = [windowGroup.name for windowGroup in self.dataProject.spaces[self.dataSpaceIndex].windowGroups]
self.spShowWindowGroupInfo = True #Toggle this to False in case window Group info isn't to be shown.
if self.spShadeSchedule and self.spShowWindowGroupInfo:
shadeData = TimeArray(self.spShadeSchedule)
shadeData = [map(int,timedata['readStadicData']) for timedata in shadeData.timedata]
self.spShadeSchedule = shadeData
self.btnSpaceSettingsContour.clicked.connect(self.spToggleContourSettings)
self.btnSpaceSettingsColours.clicked.connect(self.spToggleColorSettings)
self.calSpaceDateTimeIllum.dateChanged.connect(self.spSetCurrentIlluminanceHourCalendar)
self.cmbSpaceTimeIllum.currentIndexChanged.connect(self.spSetCurrentIlluminanceHourCalendar)
self.btnSpacePrevHour.clicked.connect(lambda:self.spSetCurrentIlluminanceHourTimeStep(False))
self.btnSpaceNextHour.clicked.connect(lambda:self.spSetCurrentIlluminanceHourTimeStep(True))
#If the timestep settings are changed, change the time step but don't trigger the illuminance plot.
self.cmbSpaceIlluminanceStepType.currentIndexChanged.connect(self.spUpdateIlluminanceTimeStep)
self.cmbSpaceIluminanceStepValue.currentIndexChanged.connect(self.spUpdateIlluminanceTimeStep)
#Settings for displaying the opacity value on a box.
self.sliderSpaceOpacity.valueChanged.connect(self.spOpacitySliderChanged)
#Settings for color values of the illuminance plot.
self.btnSelectColorLowerMask.clicked.connect(lambda:self.spMaskSettingsActivated(False))
self.btnSelectColorUpperMask.clicked.connect(lambda:self.spMaskSettingsActivated(True))
self.btnSpaceResetColors.clicked.connect(self.spResetColorSettings)
self.btnSpaceSetColors.clicked.connect(self.spSetColorSettings)
#settings for contour values for the illuminance plot.
self.cmbSpaceContourQuantity.currentIndexChanged.connect(self.spSetContourQuantity)
self.chkSpaceColors.clicked.connect(self.spRefreshPlots)
self.chkSpaceContours.clicked.connect(self.spRefreshPlots)
self.btnSpaceResetContours.clicked.connect(self.spResetContourSettings)
self.btnSpaceSetContours.clicked.connect(self.spSetContourSettings)
self.btnSpaceSetColorScheme.clicked.connect(self.spAssignSpaceColorScheme)
self.cmbSpacePlotType.currentIndexChanged.connect(self.spPlotTypeSelect)
self.cmbSpaceSelectIlluminanceFile.currentIndexChanged.connect(self.spLoadDifferentIlluminanceFile)
self.cmbSpaceTimeIllum.setCurrentIndex(10)
# self.spCanvas.mpl_connect('motion_notify_event',self.spMouseClicked)
# self.spCurrentDataSet = None
self.txtSpaceMsgBox.setText(self.dataLog)
# TODO: Delete the line below to enable the timeseries stuff.
self.tabWidget.removeTab(2)
def spMouseClicked(self,event):
"""
I am leaving this on for future reference for event releated stuff
:param event:
:return:
"""
xdata,ydata = event.xdata,event.ydata
if xdata and ydata:
xCor = list(self.illData.roomgrid.uniCorX)
yCor = list(self.illData.roomgrid.uniCorY)
xCorLen,yCorLen = len(xCor),len(yCor)
currentData = self.spCurrentDataSet
xloc = bisect.bisect(xCor,xdata)
yloc = bisect.bisect(yCor,ydata)
def spSetContourSettings(self):
contourList = []
for box in self.spContourBoxes:
if box.isEnabled() and box.text():
contourList.append(float(str(box.text())))
if self.spCurrentPlotIsIlluminance:
self.spContourValuesIlluminance = list(contourList)
self.spPlotIlluminance()
elif self.spCurrentPlotIsElectric:
self.spContourValuesElectric = list(contourList)
self.spPlotElectric()
else:
self.spContourValuesMetrics = list(contourList)
self.spPlotMetrics()
def spResetContourSettings(self):
self.cmbSpaceContourQuantity.setCurrentIndex(6)
for idx,box in enumerate(self.spContourBoxes):
if self.spCurrentPlotIsIlluminance:
box.setText(str(self.spContourValuesIlluminanceDefault[idx]))
elif self.spCurrentPlotIsElectric:
box.setText(str(self.spContourValuesElectricDefault[idx]))
else:
box.setText(str(self.spContourValuesMetricsDefault[idx]))
def spRefreshPlots(self):
"""
This is required because there are certain events that just need to trigger the current plot.
:return:
"""
if self.spCurrentPlotIsIlluminance:
self.spPlotIlluminance()
elif self.spCurrentPlotIsElectric:
self.spPlotElectric()
else:
self.spPlotMetrics()
def spLoadDifferentIlluminanceFile(self):
selectedIllFileKey = str(self.cmbSpaceSelectIlluminanceFile.currentText())
selectedIllFile = self.spAllFilesDict[selectedIllFileKey]
self.illData = Dayill(selectedIllFile,self.ptsFile)
self.txtSpaceStatusDisplay.setText("Current space: {} \tCurrent readStadicData set: {}.\t Source:{}".format(self.dataSpaceNameSelected, selectedIllFileKey, selectedIllFile))
self.spPlotIlluminance()
def spOpenJsonFileDirectly(self):
jsonFileName = QtGui.QFileDialog.getOpenFileName(self,"Select a json file to open","C:/","Json File (*.json)")
if jsonFileName:
self.jsonFile = str(jsonFileName)
self.txtJsonPath.setText(jsonFileName)
project = StadicProject(jsonFileName)
spaceTuple = [space.spaceName for space in project.spaces]
self.cmbSpaceName.clear()
self.cmbSpaceName.addItems(spaceTuple)
self.cmbSpaceName.setEnabled(True)
self.btnSelectSpaceName.setEnabled(True)
newWindowTitle = jsonFileName+" -- "+self.defaultWindowTitle
self.setWindowTitle(newWindowTitle)
del project
def spLoadVisualsFromOpenedJsonFile(self):
self.txtSpaceStatusDisplay.clear()
self.spLoadJson(self.jsonFile, self.cmbSpaceName.currentIndex())
self.tabWidget.setEnabled(True)
def spAssignSpaceColorScheme(self):
currentColor = self.spColorDict[str(self.cmbSpaceColorScheme.currentText())]
if self.chkSpaceColorSchemeInvert.checkState():
currentColor += "_r"
if self.chkSpaceColorSchemeInterpolate.checkState():
self.spInterpolateColorScheme = 'nearest'
else:
self.spInterpolateColorScheme = 'hanning'
if self.spCurrentPlotIsIlluminance:
self.spCurrentColorScheme = currentColor
self.spCurrentSpaceChartOpacityValue = self.sliderSpaceOpacity.value() / 100.0
self.spPlotIlluminance()
elif self.spCurrentPlotIsElectric:
self.spCurrentColorSchemeElectric = currentColor
self.spCurrentSpaceChartOpacityValueElectric = self.sliderSpaceOpacity.value() / 100.0
self.spPlotElectric()
else:
self.spCurrentColorSchemeMetrics = currentColor
self.spCurrentSpaceChartOpacityValueMetrics = self.sliderSpaceOpacity.value() / 100.0
self.spPlotMetrics()
# TODO:Change this to mean all plots later.
def spSetContourQuantity(self):
contourQuantity = int(self.cmbSpaceContourQuantity.currentText())
for idx,contourBoxes in enumerate(self.spContourBoxes):
if (idx+2-1)>contourQuantity:
contourBoxes.clear()
contourBoxes.setEnabled(False)
else:
contourBoxes.setEnabled(True)
def spMaskSettingsActivated(self, isUpperMask):
colorDialog = QtGui.QColorDialog
selectedColor = colorDialog.getColor()
if selectedColor.isValid():
selectedColor = selectedColor.getRgb()
if isUpperMask:
self.txtSpaceColorsUpperMask.setStyleSheet("background-color: rgb{}".format(selectedColor))
if self.spCurrentPlotIsIlluminance:
self.spIlluminanceUpperMaskColor = selectedColor
elif self.spCurrentPlotIsElectric:
self.spElectricUpperMaskColor = selectedColor
else:
self.spMetricsUpperMaskColor = selectedColor
else:
self.txtSpaceColorsLowerMask.setStyleSheet("background-color: rgb{}".format(selectedColor))
if self.spCurrentPlotIsIlluminance:
self.spIlluminanceLowerMaskColor = selectedColor
elif self.spCurrentPlotIsElectric:
self.spElectricLowerMaskColor = selectedColor
else:
self.spMetricsLowerMaskColor = selectedColor
def spSetCurrentIlluminanceHourCalendar(self):
"""
Plot illuminance based on a selection from the calendar
"""
dateVal = self.calSpaceDateTimeIllum.dateTime().date().dayOfYear()
self.spCurrentIlluminanceHour = (dateVal - 1) * 24 + self.cmbSpaceTimeIllum.currentIndex()
self.spPlotIlluminance()
def spSetCurrentIlluminanceHourTimeStep(self, stepForward):
currentHour = self.spCurrentIlluminanceHour
currentHourOriginal = currentHour
skipDarkHours = self.chkSpaceSkipDarkHours.checkState()
timeStep = self.spTimeStepIlluminance
lowerInterval = self.cmbSpaceTimeIntervalMin.currentIndex()
higherInterval = self.cmbSpaceTimeIntervalMax.currentIndex()
intervals = sorted(range(*sorted([lowerInterval,higherInterval])))
if intervals:
intervals.extend([max(intervals)+1])
else:
intervals.extend([lowerInterval])
if stepForward:
currentHour += timeStep
currentDay = (currentHour+1)//24
currentDayHour = currentHour%24
if currentDayHour not in intervals:
currentDay += 1
currentHour = currentDay*24+intervals[0]
if skipDarkHours:
while currentHour<8759 and max(self.illData.timedata[currentHour]['readStadicData'].illarr)==0:
currentHour += 1
else:
currentHour | |
# from x2paddle import torch2paddle
from copy import deepcopy
import argparse
import math
import os
import pickle
import json
import logging
import time
import paddle
import numpy as np
import random
from collections import Counter
from paddle.nn import functional as F
from model.meta import Meta
from utils.metrics import Metrics
from utils.metadataset import TrainGenerator
from utils.metadataset import evaluate_generator
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d %H:%M:%S', level=logging.INFO)
logger = logging.getLogger(__name__)
ARG = argparse.ArgumentParser()
parser = ARG
ARG = parser.parse_args()
root_path = './data/'
meta_path = root_path + 'dataset/final/'
def filter_statedict(module):
state_dict = module.state_dict(keep_vars=True)
non_params = []
for key, value in state_dict.items():
if not value.requires_grad:
non_params.append(key)
state_dict = module.state_dict()
for key in non_params:
del state_dict[key]
return state_dict
def get_cities(root_path, which='base'):
config_path = root_path + 'config/'
cities_file = config_path + which + '_cities.txt'
cities = []
with open(cities_file, 'r') as f:
for line in f:
city = line.strip()
cities.append(city)
return cities
def get_curriculum(root_path):
"""
TODO: One has to instantiate the curriculum according to the paper.
That means to
1. train the learner (DIN) model on the "transfer-learning training set" of each city independently;
2. save the validation scores to the log;
3. rank the cities from easiest to hardest according to the scores and save the ranked city indices
as a numpy array to "base_task_hardness.pkl". E.g., [4, 2, 1, 0, 5, 3, 7, 6].
"""
# config_path = root_path + "config/"
# config_file = config_path + "base_task_hardness.pkl"
# curriculum = pickle.load(open(config_file, 'rb'))
# ret = list(curriculum)
# return ret
mtrain_city_num = len(get_cities(root_path, 'base'))
return list(np.arange(mtrain_city_num))
def get_config(json_file, pkl_path=None):
with open(json_file, 'r') as f:
config = json.load(f)
config['poiid_dim'] = POIID_DIM
config['with_cont_feat'] = WITH_CONT_FEAT
poitype_to_id = pickle.load(open(poi_type2idx_path, 'rb'))
config['num_poi_types'] = len(poitype_to_id)
if pkl_path is not None:
userid_to_id = pickle.load(open(pkl_path + 'userid_to_id.pkl', 'rb'))
poiid_to_id = pickle.load(open(pkl_path + 'poiid_to_id.pkl', 'rb'))
config['num_users'] = len(userid_to_id)
config['num_pois'] = len(poiid_to_id)
logger.info('Got config from {}'.format(json_file))
return config
def get_model(meta_path, config, model_name='Mymodel', load_model_file=None,
poiid_emb_file=None):
# update some paths to config
model_save_path = meta_path + 'model_save/'
loss_save_path = meta_path + 'loss_save/'
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
if not os.path.exists(loss_save_path):
os.mkdir(loss_save_path)
config['save_path'] = '{}.pdiparams'.format(model_save_path + model_name)
config['loss_save_path'] = '{}loss_{}.txt'.format(loss_save_path,
model_name)
if not SCRATCH_ID_EMB:
config['poiid_emb_file'] = poiid_emb_file
# create model
if model_name == 'Meta':
model = None
meta_model = Meta(config)
return meta_model, model
def get_optimizer(meta_model, config):
init_parameters = list(filter(lambda p: p.trainable, meta_model.net
.parameters()))
parameters = init_parameters
clip = paddle.nn.ClipGradByValue(min=-0.25, max=0.25)
optimizer = paddle.optimizer.Adam(parameters=parameters, learning_rate=\
config['meta_lr'], grad_clip=clip)
scheduler = paddle.optimizer.lr.ReduceOnPlateau(learning_rate=config[
'meta_lr'], mode='max', factor=0.2, patience=PATIENCE, verbose=True,
min_lr=1e-06)
return optimizer, scheduler
def task_to_device(x_spt, y_spt, x_qry, y_qry, poiid_embs, device):
if type(y_spt) == list:
for i in range(len(x_spt)):
for j in range(len(x_spt[0])):
x_spt[i][j] = x_spt[i][j].to(device)
x_qry[i][j] = x_qry[i][j].to(device)
for i in range(len(y_spt)):
y_spt[i] = y_spt[i].to(device)
y_qry[i] = y_qry[i].to(device)
poiid_embs[i] = poiid_embs[i].to(device)
return x_spt, y_spt, x_qry, y_qry, poiid_embs
else:
for i in range(len(x_spt)):
x_spt[i] = x_spt[i].to(device)
x_qry[i] = x_qry[i].to(device)
return x_spt, y_spt.to(device), x_qry, y_qry.to(device), poiid_embs.to(
device)
def evaluate(data_loader, meta_model, metric, device, model=None,
init_compare=False, silence=False):
task_scores = []
task_score_weights = [] # the weight of each task in the final evaluation scores (according to the amount of query users)
if not silence:
print('\t'.join(['Hits@5', 'Hits@10', 'NDCG@5', 'NDCG@10', 'city']))
for data in data_loader: # evaluate on one meta-test city (task) each time
x_spt, y_spt, task_iterator, poiid_emb, city_name, scaler = data # one meta-test task
for i in range(len(x_spt)):
x_spt[i] = x_spt[i].to(device)
y_spt = y_spt.to(device)
poiid_emb = poiid_emb.to(device)
init_weights = list(meta_model.net.parameters())
init_weights[0] = poiid_emb
fast_weights = meta_model.finetuning_adapt(x_spt, y_spt, poiid_emb,
scaler=scaler)
all_y_pred_prob = []
all_y_truth = []
init_all_y_pred_prob = []
for x_qry, y_qry in task_iterator:
for i in range(len(x_qry)):
x_qry[i] = x_qry[i].to(device)
y_qry = y_qry.to(device)
y_pred, y_pred_prob = meta_model.finetuning_predict(x_qry,
y_qry, fast_weights, poiid_emb, scaler=scaler)
all_y_pred_prob.extend(y_pred_prob)
all_y_truth.extend(y_qry.data.detach().cpu().numpy().tolist())
if init_compare:
y_pred, y_pred_prob = meta_model.finetuning_predict(x_qry,
y_qry, init_weights, poiid_emb, scaler=scaler)
init_all_y_pred_prob.extend(y_pred_prob)
scores = metric.compute_metric(all_y_pred_prob, all_y_truth,
session_len=101)
if not silence:
if init_compare:
init_scores = metric.compute_metric(init_all_y_pred_prob,
all_y_truth, session_len=101)
print('(init) {:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{}'.format(*
init_scores, city_name))
print('(adapt) {:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{}'.format(*
scores, city_name))
else:
print('{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{}'.format(*scores,
city_name))
task_scores.append(list(scores))
task_score_weights.append(len(all_y_truth) / 101)
return np.array(task_scores), np.array(task_score_weights)
def update_hardness(task_idxs, task_sample_sub2user, results):
task_level_acc = results['task_level_acc']
task_sample_level_corrects = results['task_sample_level_corrects']
task_idx2acc = {}
task_idx_to_user2acc = {}
for i in range(len(task_idxs)):
task_idx2acc[task_idxs[i]] = task_level_acc[i]
sample_corrects = task_sample_level_corrects[i]
sample_sub2user = task_sample_sub2user[i]
user2acc = {}
for j in range(len(sample_corrects)):
user = sample_sub2user[j]
if user in user2acc:
user2acc[user].append(sample_corrects[j])
else:
user2acc[user] = [sample_corrects[j]]
for user in user2acc:
user_level_acc = sum(user2acc[user]) / len(user2acc[user])
user2acc[user] = user_level_acc
user2acc = dict(sorted(user2acc.items(), key=lambda x: x[1]))
task_idx_to_user2acc[task_idxs[i]] = user2acc
task_idx2acc = dict(sorted(task_idx2acc.items(), key=lambda x: x[1]))
task_idx2results = {'task_idx2acc': task_idx2acc,
'task_idx_to_user2acc': task_idx_to_user2acc}
return task_idx2results
def one_meta_training_step(task_gen, meta_model, optimizer, device,
parameters, task_idx2results, stage, curriculum, hard_task, batch_id):
data, task_idxs, task_sample_sub2user, cont_feat_scalers = (task_gen.
fetch_task_batch(task_idx2results=task_idx2results, stage=stage,
curriculum=curriculum, hard_task=hard_task, batch_id=batch_id))
x_spt, y_spt, x_qry, y_qry, poiid_embs = task_to_device(*data, device)
accs, loss_q, results = meta_model(x_spt, y_spt, x_qry, y_qry,
poiid_embs=poiid_embs, cont_feat_scalers=cont_feat_scalers)
optimizer.clear_grad()
loss_q.backward(retain_graph=True)
# torch2paddle.clip_grad_value_(parameters, 0.25)
optimizer.step()
task_idx2results = update_hardness(task_idxs, task_sample_sub2user, results
)
# accs,loss_q.item() are for logging during training, task_idx2results supports the next meta training step
return accs, loss_q.item(), task_idx2results
def main_meta(meta_path, root_path, id_emb_path, model_name='Meta'):
# read config file
config_path = 'config/config-'
model2config = {'Meta': '{}{}.json'.format(config_path, 'chaml')}
config = get_config(model2config[model_name])
print(config)
# get meta_model, optimizer, metrics
meta_model, model = get_model(meta_path, config, model_name)
optimizer, scheduler = get_optimizer(meta_model, config)
device = 'cuda'
device = device.replace('cuda', 'cpu') # TODO: you may delete this row to allow GPU
device = paddle.set_device(device)
meta_model = meta_model.to(device)
if model is not None:
model.to(device)
parameters = list(filter(lambda p: p.requires_grad, meta_model.
parameters()))
tmp = filter(lambda x: x.requires_grad, meta_model.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(meta_model)
logger.info('Total trainable tensors: {}'.format(num))
metric = Metrics()
# load the dataset
# a list, each element of the list is the data of a meta-training task (samples, candidates, user2items)
mtrain_tasks = pickle.load(open(meta_path + 'mtrain_tasks.pkl', 'rb'))
# a list, each element of the list is the data of a meta-valid task(spt_samples, qry_samples, candidates, qry_user2items)
mvalid_tasks = pickle.load(open(meta_path + 'mvalid_tasks.pkl', 'rb'))
mtest_tasks = pickle.load(open(meta_path + 'mtest_tasks.pkl', 'rb'))
logger.info('Loaded all the data pickles!')
# set variables for statistics
best_scores = 0
running_loss = 0
batch_id = 0
# start training
running_accs = np.zeros(config['update_step'] + 1)
task_gen = TrainGenerator(root_path, meta_path, id_emb_path,
mtrain_tasks, config['train_qry_batch_size'], config[
'task_batch_size'], curriculum_task_idxs=get_curriculum(root_path),
pacing_function=PACING_FUNCTION, few_num=config['few_num'],
max_steps=config['max_train_steps'])
task_idx2results = {'task_idx2acc': None, 'task_idx_to_user2acc': None}
hard_task_counter = Counter()
while True:
# >>>>> [Stage1] sample the hardest tasks of last round, and then sample more tasks.
accs, loss, task_idx2results = one_meta_training_step(task_gen,
meta_model, optimizer, device, parameters, task_idx2results,
'stage1', CURRICULUM, HARD_TASK, batch_id=batch_id)
running_loss += loss
running_accs += accs
batch_id += 1
hard_task_counter.update(list(task_idx2results['task_idx2acc'].keys()))
if HARD_USER:
# >>>>> Stage2: the same tasks as Stage 1, keep the hardest users, and sample new users in these tasks.
accs, loss, task_idx2results = one_meta_training_step(task_gen,
meta_model, optimizer, device, parameters, task_idx2results,
'stage2', CURRICULUM, HARD_TASK, batch_id=batch_id)
running_loss += loss
running_accs += accs
batch_id += 1
hard_task_counter.update(list(task_idx2results['task_idx2acc'].
keys()))
if batch_id > config['max_train_steps']:
break
if (batch_id / STAGE_NUM + 1) % PER_TRAIN_LOG == 0:
training_loss = running_loss / PER_TRAIN_LOG / STAGE_NUM
print('Task Batch[{}]: loss_q: {:.6f}, training accs: {}'.
format(batch_id + 1, training_loss, running_accs /
PER_TRAIN_LOG / STAGE_NUM))
running_loss = 0
running_accs = np.zeros(config['update_step'] + 1)
if (batch_id / STAGE_NUM + 1) % PER_TEST_LOG == 0:
meta_model.eval()
print('=== Valid tasks ===')
valid_scores, valid_score_weights = evaluate(evaluate_generator
(root_path, id_emb_path, mvalid_tasks, few_num=None,
neg_num=100), meta_model, metric, device)
avg_valid_scores = np.average(valid_scores, axis=0, weights=\
valid_score_weights)
print('Average valid scores: {:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'.
format(*avg_valid_scores))
print('=== Test tasks ===')
test_scores, test_score_weights = evaluate(evaluate_generator(
root_path, id_emb_path, mtest_tasks, few_num=None, neg_num=\
100, is_test=True), meta_model, metric, device,
init_compare=INIT_COMPARE)
avg_test_scores = np.average(test_scores, axis=0, weights=\
test_score_weights)
print('Average test scores: {:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'.
format(*avg_test_scores))
valid_tot_score = np.mean(avg_valid_scores)
if valid_tot_score > best_scores:
best_scores = valid_tot_score
dict_save_path = os.path.join(meta_path + 'model_save/',
str(batch_id + 1) + '.dict')
# paddle.save(filter_statedict(meta_model), dict_save_path)
paddle.save(meta_model.state_dict(), dict_save_path)
logger.info('Best metrics: {}! Save model to {}'.format(
valid_tot_score, dict_save_path))
scheduler.step(valid_tot_score)
meta_model.train()
print(hard_task_counter)
# torch2paddle.invalid()
if __name__ == '__main__':
id_emb_path = root_path + 'id_embs/'
def poitype_pkl():
for root, dirs, files in os.walk(root_path + 'pkls/'):
for filename in files:
if 'poitype_to_id.pkl' in filename:
return str(os.path.join(root, filename))
print('Please check data/pkls/ ...')
exit(2)
poi_type2idx_path = poitype_pkl()
POIID_DIM = 50 # the dimension of poi id embedding, related to the preprocessed embedding files in id_emb_path
WITH_CONT_FEAT = True # always True
SCRATCH_ID_EMB = False # always False
# Settings for CHAML
CURRICULUM = True
HARD_TASK = True
HARD_USER = True
PACING_FUNCTION = 'ssp' # ssp: single | |
decimal point. if it's still not working error message and return
try:
tdia = float(self.ui.tools_table.item(row, 1).text().replace(',', '.'))
except ValueError:
self.app.inform.emit('[ERROR_NOTCL] %s' % _("Wrong value format entered, use a number."))
continue
sorted_tools.append(float('%.*f' % (self.decimals, tdia)))
if not sorted_tools:
self.app.inform.emit('[ERROR_NOTCL] %s' % _("There are no tools selected in the Tool Table."))
return 'fail'
order = self.ui.order_radio.get_value()
if order == 'fwd':
sorted_tools.sort(reverse=False)
elif order == 'rev':
sorted_tools.sort(reverse=True)
else:
pass
# decide to use "progressive" or "normal" plotting
prog_plot = self.app.defaults["tools_iso_plotting"]
for sorted_tool in sorted_tools:
for tool in tools_storage:
if float('%.*f' % (self.decimals, tools_storage[tool]['tooldia'])) == sorted_tool:
tool_dia = tools_storage[tool]['tooldia']
tool_type = tools_storage[tool]['tool_type']
tool_data = tools_storage[tool]['data']
passes = tool_data['tools_iso_passes']
overlap = tool_data['tools_iso_overlap']
overlap /= 100.0
milling_type = tool_data['tools_iso_milling_type']
# if milling type is climb then the move is counter-clockwise around features
mill_dir = True if milling_type == 'cl' else False
iso_t = {
'ext': 0,
'int': 1,
'full': 2
}[tool_data['tools_iso_isotype']]
forced_rest = self.ui.forced_rest_iso_cb.get_value()
iso_except = self.ui.except_cb.get_value()
outname = "%s_%.*f" % (iso_obj.options["name"], self.decimals, float(tool_dia))
internal_name = outname + "_iso"
if iso_t == 0:
internal_name = outname + "_ext_iso"
elif iso_t == 1:
internal_name = outname + "_int_iso"
tool_data.update({
"name": internal_name,
})
solid_geo, work_geo = self.generate_rest_geometry(geometry=work_geo, tooldia=tool_dia,
passes=passes, overlap=overlap, invert=mill_dir,
env_iso_type=iso_t, negative_dia=negative_dia,
forced_rest=forced_rest,
prog_plot=prog_plot,
prog_plot_handler=self.plot_temp_shapes)
# ############################################################
# ########## AREA SUBTRACTION ################################
# ############################################################
if iso_except:
self.app.proc_container.update_view_text(' %s' % _("Subtracting Geo"))
solid_geo = self.area_subtraction(solid_geo)
if lim_area:
self.app.proc_container.update_view_text(' %s' % _("Intersecting Geo"))
solid_geo = self.area_intersection(solid_geo, intersection_geo=lim_area)
# make sure that no empty geometry element is in the solid_geometry
new_solid_geo = [geo for geo in solid_geo if not geo.is_empty]
tools_storage.update({
tool: {
'tooldia': float(tool_dia),
'offset': 'Path',
'offset_value': 0.0,
'type': 'Rough',
'tool_type': tool_type,
'data': tool_data,
'solid_geometry': deepcopy(new_solid_geo)
}
})
total_solid_geometry += new_solid_geo
# if the geometry is all isolated
if not work_geo:
break
# clean the progressive plotted shapes if it was used
if self.app.defaults["tools_iso_plotting"] == 'progressive':
self.temp_shapes.clear(update=True)
# remove tools without geometry
for tool, tool_dict in list(tools_storage.items()):
if not tool_dict['solid_geometry']:
tools_storage.pop(tool, None)
def iso_init(geo_obj, app_obj):
geo_obj.options["cnctooldia"] = str(tool_dia)
geo_obj.tools = dict(tools_storage)
geo_obj.solid_geometry = total_solid_geometry
# even if combine is checked, one pass is still single-geo
# remove the tools that have no geometry
for geo_tool in list(geo_obj.tools.keys()):
if not geo_obj.tools[geo_tool]['solid_geometry']:
geo_obj.tools.pop(geo_tool, None)
if len(tools_storage) > 1:
geo_obj.multigeo = True
else:
for ky in tools_storage.keys():
passes_no = float(tools_storage[ky]['data']['tools_iso_passes'])
geo_obj.multigeo = True
break
# detect if solid_geometry is empty and this require list flattening which is "heavy"
# or just looking in the lists (they are one level depth) and if any is not empty
# proceed with object creation, if there are empty and the number of them is the length
# of the list then we have an empty solid_geometry which should raise a Custom Exception
empty_cnt = 0
if not isinstance(geo_obj.solid_geometry, list) and \
not isinstance(geo_obj.solid_geometry, MultiPolygon):
geo_obj.solid_geometry = [geo_obj.solid_geometry]
for g in geo_obj.solid_geometry:
if g:
break
else:
empty_cnt += 1
if empty_cnt == len(geo_obj.solid_geometry):
app_obj.inform.emit('[ERROR_NOTCL] %s: %s' % (_("Empty Geometry in"), geo_obj.options["name"]))
return 'fail'
else:
app_obj.inform.emit('[success] %s: %s' % (_("Isolation geometry created"), geo_obj.options["name"]))
self.app.app_obj.new_object("geometry", iso_name, iso_init, plot=plot)
# the tools are finished but the isolation is not finished therefore it failed
if work_geo:
self.app.inform.emit("[WARNING] %s" % _("Partial failure. The geometry was processed with all tools.\n"
"But there are still not-isolated geometry elements. "
"Try to include a tool with smaller diameter."))
msg = _("The following are coordinates for the copper features that could not be isolated:")
self.app.inform_shell.emit(msg)
msg = ''
for geo in work_geo:
pt = geo.representative_point()
coords = '(%s, %s), ' % (str(pt.x), str(pt.y))
msg += coords
self.app.inform_shell.emit(msg=msg)
def combined_normal(self, iso_obj, iso2geo, tools_storage, lim_area, negative_dia=None, plot=True):
"""
:param iso_obj: the isolated Gerber object
:type iso_obj: AppObjects.FlatCAMGerber.GerberObject
:param iso2geo: specific geometry to isolate
:type iso2geo: list of Shapely Polygon
:param tools_storage: a dictionary that holds the tools and geometry
:type tools_storage: dict
:param lim_area: if not None restrict isolation to this area
:type lim_area: Shapely Polygon or a list of them
:param negative_dia: isolate the geometry with a negative value for the tool diameter
:type negative_dia: bool
:param plot: if to plot the resulting geometry object
:type plot: bool
:return: Isolated solid geometry
:rtype:
"""
log.debug("ToolIsolation.combined_normal()")
total_solid_geometry = []
iso_name = iso_obj.options["name"] + '_iso_combined'
geometry = iso2geo
prog_plot = self.app.defaults["tools_iso_plotting"]
sorted_tools = []
table_items = self.ui.tools_table.selectedItems()
sel_rows = {t.row() for t in table_items}
for row in sel_rows:
tid = int(self.ui.tools_table.item(row, 3).text())
sorted_tools.append(tid)
if not sorted_tools:
self.app.inform.emit('[ERROR_NOTCL] %s' % _("There are no tools selected in the Tool Table."))
return 'fail'
for tool in sorted_tools:
tool_dia = tools_storage[tool]['tooldia']
tool_has_offset = tools_storage[tool]['offset']
tool_offset_value = tools_storage[tool]['offset_value']
tool_type = tools_storage[tool]['tool_type']
tool_cut_type = tools_storage[tool]['type']
tool_data = tools_storage[tool]['data']
to_follow = tool_data['tools_iso_follow']
# TODO what to do when the iso2geo param is not None but the Follow cb is checked
# for the case when limited area is used .... the follow geo should be clipped too
work_geo = geometry
if work_geo is None:
work_geo = iso_obj.follow_geometry if to_follow else iso_obj.solid_geometry
iso_t = {
'ext': 0,
'int': 1,
'full': 2
}[tool_data['tools_iso_isotype']]
passes = tool_data['tools_iso_passes']
overlap = tool_data['tools_iso_overlap']
overlap /= 100.0
milling_type = tool_data['tools_iso_milling_type']
iso_except = self.ui.except_cb.get_value()
outname = "%s_%.*f" % (iso_obj.options["name"], self.decimals, float(tool_dia))
internal_name = outname + "_iso"
if iso_t == 0:
internal_name = outname + "_ext_iso"
elif iso_t == 1:
internal_name = outname + "_int_iso"
tool_data.update({
"name": internal_name,
})
solid_geo = []
for nr_pass in range(passes):
iso_offset = tool_dia * ((2 * nr_pass + 1) / 2.0000001) - (nr_pass * overlap * tool_dia)
if negative_dia:
iso_offset = -iso_offset
# if milling type is climb then the move is counter-clockwise around features
mill_dir = 1 if milling_type == 'cl' else 0
iso_geo = self.generate_envelope(iso_offset, mill_dir, geometry=work_geo, env_iso_type=iso_t,
follow=to_follow, nr_passes=nr_pass, prog_plot=prog_plot)
if iso_geo == 'fail':
self.app.inform.emit('[ERROR_NOTCL] %s' % _("Isolation geometry could not be generated."))
continue
try:
for geo in iso_geo:
solid_geo.append(geo)
except TypeError:
solid_geo.append(iso_geo)
# ############################################################
# ########## AREA SUBTRACTION ################################
# ############################################################
if iso_except:
self.app.proc_container.update_view_text(' %s' % _("Subtracting Geo"))
solid_geo = self.area_subtraction(solid_geo)
if lim_area:
self.app.proc_container.update_view_text(' %s' % _("Intersecting Geo"))
solid_geo = self.area_intersection(solid_geo, intersection_geo=lim_area)
# make sure that no empty geometry element is in the solid_geometry
new_solid_geo = [geo for geo in solid_geo if not geo.is_empty]
tools_storage.update({
tool: {
'tooldia': float(tool_dia),
'offset': tool_has_offset,
'offset_value': tool_offset_value,
'type': tool_cut_type,
'tool_type': tool_type,
'data': tool_data,
'solid_geometry': deepcopy(new_solid_geo)
}
})
total_solid_geometry += new_solid_geo
# clean the progressive plotted shapes if it was used
if prog_plot == 'progressive':
self.temp_shapes.clear(update=True)
# remove tools without geometry
for tool, tool_dict in list(tools_storage.items()):
if not tool_dict['solid_geometry']:
tools_storage.pop(tool, None)
def iso_init(geo_obj, app_obj):
geo_obj.options["cnctooldia"] = str(tool_dia)
geo_obj.tools = dict(tools_storage)
geo_obj.solid_geometry = total_solid_geometry
# even if combine is checked, one pass is still single-geo
if len(tools_storage) > 1:
geo_obj.multigeo = True
else:
if to_follow:
geo_obj.multigeo = False
else:
passes_no = 1
for ky in tools_storage.keys():
passes_no = float(tools_storage[ky]['data']['tools_iso_passes'])
geo_obj.multigeo = True
break
geo_obj.multigeo = True
# detect if solid_geometry is empty and this require list flattening which is "heavy"
# or just looking in the lists (they are one level depth) and if any is not empty
# proceed with object creation, if there are empty and the number of them is the length
# of the list then we have an empty solid_geometry which should raise a Custom Exception
empty_cnt = 0
if not isinstance(geo_obj.solid_geometry, list) and \
not isinstance(geo_obj.solid_geometry, MultiPolygon):
geo_obj.solid_geometry = [geo_obj.solid_geometry]
for g in geo_obj.solid_geometry:
if g:
break
else:
empty_cnt += 1
if empty_cnt == len(geo_obj.solid_geometry):
app_obj.inform.emit('[ERROR_NOTCL] %s: %s' % (_("Empty Geometry in"), geo_obj.options["name"]))
return 'fail'
else:
app_obj.inform.emit('[success] %s: %s' % (_("Isolation geometry created"), geo_obj.options["name"]))
self.app.app_obj.new_object("geometry", iso_name, iso_init, plot=plot)
def area_subtraction(self, geo, subtraction_geo=None):
"""
Subtracts the subtraction_geo (if present else self.solid_geometry) from the geo
:param geo: target geometry from which to subtract
:param subtraction_geo: geometry that acts as subtraction geo
:return:
"""
new_geometry = []
target_geo = geo
if subtraction_geo:
sub_union = unary_union(subtraction_geo)
else:
name = self.ui.exc_obj_combo.currentText()
subtractor_obj = self.app.collection.get_by_name(name)
sub_union = unary_union(subtractor_obj.solid_geometry)
try:
for geo_elem in target_geo:
if isinstance(geo_elem, Polygon):
for ring in self.poly2rings(geo_elem):
| |
<gh_stars>0
# Copyright 2015 Fortinet Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fortiosclient import exception
import netaddr
from oslo_log import log as logging
from oslo_serialization import jsonutils
from neutron.db.models import l3 as l3_db
from neutron.db.models import segment as segments_db
from neutron.db import models_v2
from bell_fortinet.common import constants as const
from bell_fortinet.common import resources as resources
from bell_fortinet.db import models as fortinet_db
from bell_fortinet.tasks import constants as t_consts
LOG = logging.getLogger(__name__)
def add_record(obj, context, cls, **kwargs):
res = cls.add_record(context, **kwargs)
if not res:
return res
if res.get('rollback', {}):
obj.task_manager.add(getid(context), **res['rollback'])
return res.get('result', None)
def op(obj, context, func, **data):
res = func(obj._driver, data)
if res.get('rollback', {}):
obj.task_manager.add(getid(context), **res['rollback'])
return res.get('result', res)
def check(obj, context, vdom, resource=resources.VlanInterface):
vlink_vlan = fortinet_db.query_record(context,
fortinet_db.Fortinet_Vlink_Vlan_Allocation, vdom=vdom)
if vlink_vlan:
try:
op(obj, context, resource.get,
vdom=vdom, name=vlink_vlan.inf_name_int_vdom)
op(obj, context, resource.get,
vdom=const.EXT_VDOM, name=vlink_vlan.inf_name_ext_vdom)
except exception.ResourceNotFound as e:
import inspect
caller = inspect.stack()[1][3]
LOG.debug("## Check vlink interface failed on the %(func)s.",
{'func': caller})
resources.Exinfo(e)
def getid(context):
id = getattr(context, 'request_id', None)
if not id:
if not getattr(context, 'session', None):
return const.INIT_TASK_ID
else:
raise ValueError("not get request_id")
return id
def port_range(range):
"""
:param range: openstack port range format '200: 300'
:return: fortigate port range format: '100-200'
e.g. tcp-portrange 100-200:300-400
"""
if range:
return '-'.join([p.strip() for p in range.split(':')])
else:
return '1-65535'
def get_mac(obj, context, interface=None):
if not interface:
interface = obj._fortigate['int_interface']
res = op(obj, context, resources.VlanInterface.get, name=interface)
if 200 == res['http_status']:
return res['results'][0]['macaddr']
return None
def getip(ipsubnet, place):
return "%s %s" % (get_ipaddr(ipsubnet, place), get_netmask(ipsubnet))
def get_ipaddr(ip_subnet, place=1):
return str(netaddr.IPNetwork(ip_subnet)[place])
def get_netmask(ip_subnet):
return str(netaddr.IPNetwork(ip_subnet).netmask)
def get_subnet(ip_subnet):
"""
:param ip_subnet: input '192.168.138.0/24'
:return: '192.168.138.0 255.255.255.0'
"""
cidr = netaddr.IPNetwork(ip_subnet)
return ' '.join([str(cidr.network), str(cidr.netmask)])
def get_segmentation_id(context, network_id):
ml2_net_seg = fortinet_db.query_record(context,
segments_db.NetworkSegment,
network_id=network_id)
return getattr(ml2_net_seg, 'segmentation_id', None)
def get_intf(context, network_id):
vlanid = get_segmentation_id(context, network_id=network_id)
return const.PREFIX['inf'] + str(vlanid) if vlanid else None
def backup_fields(record, **kwargs):
rbkwargs = {}
for key in kwargs:
if hasattr(record, key):
rbkwargs.setdefault(key, record.key)
return rbkwargs
def update_status(obj, context, status):
obj.task_manager.update_status(getid(context), status)
def _rollback_on_err(obj, context, err):
update_status(obj, context, t_consts.TaskStatus.ROLLBACK)
resources.Exinfo(err)
def _prepare_params(record, resource, *keys, **kwargs):
if record:
params = {key: getattr(record, key, None) for key in keys
if getattr(record, key, None)}
#if 'id' in keys:
# params.setdefault('id', getattr(record, 'edit_id', None))
else:
LOG.debug("_prepare_params() called, record is None, "
"resource=%(res)s, kwargs=%(kwargs)s",
{'res': resource, 'kwargs': kwargs})
params = {key: kwargs.get(key, None) for key in keys if key in kwargs}
return params
def add_by_keys(obj, context, cls, resource, *keys, **kwargs):
record = add_record(obj, context, cls, **kwargs)
add_resource_with_keys(obj, context, record, resource, *keys, **kwargs)
return record
def set_by_keys(obj, context, cls, resource, *keys, **kwargs):
params = _prepare_params(None, resource, *keys, **kwargs)
record = fortinet_db.query_record(context, cls, **params)
if record:
record = cls.update_record(context, record, **kwargs)
set_resource_with_keys(obj, context, record, resource, *keys, **kwargs)
else:
record = add_by_keys(obj, context, cls, resource, *keys, **kwargs)
return record
def delete_by_keys(obj, context, cls, resource, *keys, **kwargs):
record = fortinet_db.query_record(context, cls, **kwargs)
delete_resource_with_keys(obj, context, record, resource, *keys, **kwargs)
return fortinet_db.delete_record(context, cls, **kwargs)
def add_resource_with_keys(obj, context, record, resource, *keys, **kwargs):
params = _prepare_params(record, resource, *keys, **kwargs)
try:
op(obj, context, resource.get, **params)
except exception.ResourceNotFound:
return op(obj, context, resource.add, **kwargs)
return None
def set_resource_with_keys(obj, context, record, resource, *keys, **kwargs):
params = _prepare_params(record, resource, *keys, **kwargs)
try:
op(obj, context, resource.get, **params)
for key in keys:
kwargs.setdefault(key, params[key])
return op(obj, context, resource.set, **kwargs)
except exception.ResourceNotFound:
LOG.debug("The resource %(rs)s with fields %(kws)s "
"is not exist, create a new one instead",
{"rs": resource, 'kws': kwargs})
return op(obj, context, resource.add, **kwargs)
def delete_resource_with_keys(obj, context, record, resource, *keys, **kwargs):
params = _prepare_params(record, resource, *keys, **kwargs)
try:
op(obj, context, resource.get, **params)
return op(obj, context, resource.delete, **params)
except exception.ResourceNotFound as e:
resources.Exinfo(e)
return None
def add_resource_with_name(obj, context, record, resource, **kwargs):
return add_resource_with_keys(obj, context, record, resource,
'vdom', 'name', **kwargs)
def set_resource_with_name(obj, context, record, resource, **kwargs):
return set_resource_with_keys(obj, context, record, resource,
'vdom', 'name', **kwargs)
def delete_resource_with_name(obj, context, record, resource, **kwargs):
return delete_resource_with_keys(obj, context, record, resource,
'vdom', 'name', **kwargs)
def add_resource_with_id(obj, context, record, resource, **kwargs):
if getattr(record, 'edit_id', None):
try:
res = op(obj, context, resource.get,
vdom=record.vdom, id=record.edit_id)
return res
except exception.ResourceNotFound:
pass
else:
# TODO(samsu): may add search existing data in devices later
pass
return op(obj, context, resource.add, **kwargs)
def set_resource_with_id(obj, context, record, resource, **kwargs):
# because the name 'edit_id' in record is different with id in
# the api templates, the id related function can not reuse the
# related xx_keys function
if getattr(record, 'edit_id', None):
try:
op(obj, context, resource.get, vdom=record.vdom, id=record.edit_id)
if kwargs.get('id', None):
del kwargs['id']
kwargs.setdefault('id', str(record.edit_id))
kwargs.setdefault('vdom', record.vdom)
return op(obj, context, resource.set, **kwargs)
except Exception as e:
resources.Exinfo(e)
raise e
def delete_resource_with_id(obj, context, record, resource):
if getattr(record, 'edit_id', None):
try:
op(obj, context, resource.get, vdom=record.vdom, id=record.edit_id)
return op(obj, context, resource.delete,
vdom=record.vdom, id=record.edit_id)
except Exception as e:
resources.Exinfo(e)
return None
def add_by_name(obj, context, cls, resource, **kwargs):
return add_by_keys(obj, context, cls, resource, 'vdom', 'name', **kwargs)
def set_by_name(obj, context, cls, resource, **kwargs):
return set_by_keys(obj, context, cls, resource, 'vdom', 'name', **kwargs)
def delete_by_name(obj, context, cls, resource, **kwargs):
return delete_by_keys(obj, context, cls, resource,
'vdom', 'name', **kwargs)
def add_by_id(obj, context, cls, resource, **kwargs):
record = add_record(obj, context, cls, **kwargs)
res = add_resource_with_id(obj, context, record, resource, **kwargs)
if not getattr(record, 'edit_id'):
if res.get('results'):
edit_id = res['results']['mkey']
else:
edit_id = res['mkey']
cls.update_record(context, record, edit_id=edit_id)
return record
def set_by_id(obj, context, cls, resource, **kwargs):
params = _prepare_params(None, resource, 'vdom', 'id', **kwargs)
record = fortinet_db.query_record(context, cls, **params)
if record:
cls.update_record(context, record, **kwargs)
set_resource_with_id(obj, context, record, resource, **kwargs)
return record
else:
return None
def delete_by_id(obj, context, cls, resource, **kwargs):
record = fortinet_db.query_record(context, cls, **kwargs)
delete_resource_with_id(obj, context, record, resource)
return fortinet_db.delete_record(context, cls, **kwargs)
def add_vdom(obj, context, **kwargs):
namespace = add_record(obj, context, fortinet_db.Fortinet_ML2_Namespace,
**kwargs)
try:
op(obj, context, resources.Vdom.get, name=namespace.vdom)
except exception.ResourceNotFound:
op(obj, context, resources.Vdom.add, name=namespace.vdom)
return namespace
def delete_vdom(obj, context, **kwargs):
cls = fortinet_db.Fortinet_ML2_Namespace
namespace = fortinet_db.query_record(context, cls, **kwargs)
if namespace:
tenant_id = namespace.tenant_id
if not fortinet_db.query_count(context, l3_db.Router,
tenant_id=tenant_id) and \
not fortinet_db.query_count(context, models_v2.Network,
tenant_id=tenant_id) and \
not fortinet_db.query_count(context, l3_db.FloatingIP,
tenant_id=tenant_id):
try:
op(obj, context, resources.Vdom.get, name=namespace.vdom)
op(obj, context, resources.Vdom.delete, name=namespace.vdom)
except Exception as e:
resources.Exinfo(e)
fortinet_db.delete_record(context, cls, **kwargs)
else:
db_routers = fortinet_db.query_records(context, l3_db.Router,
tenant_id=tenant_id)
db_networks = fortinet_db.query_records(context, models_v2.Network,
tenant_id=tenant_id)
db_fips = fortinet_db.query_records(context, l3_db.FloatingIP,
tenant_id=tenant_id)
LOG.debug("Keeping vdom, because existing db_routers: %(routers)s,"
"db_networks: %(networks)s, db_fips: %(fips)s",
{'routers': db_routers, 'networks': db_networks,
'fips': db_fips})
return namespace
def add_vdomlink(obj, context, **kwargs):
cls = fortinet_db.Fortinet_Vdom_Vlink
record = add_record(obj, context, cls, **kwargs)
add_resource_with_keys(obj, context, record, resources.VdomLink,
'name', name=getattr(record, 'name', None))
return record
def delete_vdomlink(obj, context, **kwargs):
return delete_by_keys(obj, context, fortinet_db.Fortinet_Vdom_Vlink,
resources.VdomLink, 'name', **kwargs)
def add_vlanintf(obj, context, **kwargs):
if 'alias' in kwargs and kwargs.get('alias', None):
kwargs['alias'] = kwargs['alias'][:32]
return add_by_name(obj, context,
fortinet_db.Fortinet_Interface,
resources.VlanInterface,
**kwargs)
def set_vlanintf(obj, context, **kwargs):
cls = fortinet_db.Fortinet_Interface
record = cls.query_one(context, **kwargs)
#backup_fields(record, **kwargs)
if not record:
cls.add_record(context, **kwargs)
op(obj, context, resources.VlanInterface.set, **kwargs)
def delete_vlanintf(obj, context, **kwargs):
return delete_by_name(obj, context,
fortinet_db.Fortinet_Interface,
resources.VlanInterface,
**kwargs)
def add_dhcpserver(obj, context, **kwargs):
cls = fortinet_db.Fortinet_ML2_Subnet
record = add_record(obj, context, cls, **kwargs)
kwargs.pop('subnet_id', None)
res = add_resource_with_id(obj, context, record,
resources.DhcpServer, **kwargs)
if not record.edit_id:
if res.get('results'):
edit_id = res['results']['mkey']
else:
edit_id = res['mkey']
cls.update_record(context, record, edit_id=edit_id)
def set_dhcpserver(obj, context, **kwargs):
cls = fortinet_db.Fortinet_ML2_Subnet
if 'subnet_id' in kwargs:
record = cls.query_one(context, subnet_id=kwargs['subnet_id'])
if record.edit_id:
cls.update_record(context, record, **kwargs)
kwargs.pop('subnet_id', None)
kwargs.setdefault('id', record.edit_id)
op(obj, context, resources.DhcpServer.set, **kwargs)
def delete_dhcpserver(obj, context, **kwargs):
return delete_by_id(obj, context, fortinet_db.Fortinet_ML2_Subnet,
resources.DhcpServer, **kwargs)
def add_reservedip(obj, context, **kwargs):
cls = fortinet_db.Fortinet_ML2_ReservedIP
add_record(obj, context, cls, **kwargs)
db_reservedips = fortinet_db.query_records(context, cls,
subnet_id=kwargs.get('subnet_id'))
db_subnet = fortinet_db.query_record(context,
fortinet_db.Fortinet_ML2_Subnet,
subnet_id=kwargs.get('subnet_id'))
if db_subnet:
reserved_addresses = []
for rsrvdip in db_reservedips:
reserved_addresses.append({'id': rsrvdip.edit_id,
'ip': rsrvdip.ip,
'mac': rsrvdip.mac})
op(obj, context, resources.DhcpServerRsvAddr.set,
id=db_subnet.edit_id,
vdom=kwargs.get('vdom'),
reserved_address=jsonutils.dumps(reserved_addresses))
# TODO(samsu): add rollback of dhcpserver set
def delete_reservedip(obj, context, **kwargs):
cls = fortinet_db.Fortinet_ML2_ReservedIP
reserved_ip = fortinet_db.query_record(context, cls, **kwargs)
if reserved_ip:
db_reservedips = fortinet_db.query_records(context, cls,
subnet_id=reserved_ip.subnet_id)
db_reservedips.remove(reserved_ip)
reserved_addresses = []
for rsrvdip in db_reservedips:
reserved_addresses.append({'id': rsrvdip.edit_id,
'ip': rsrvdip.ip,
'mac': rsrvdip.mac})
db_subnet = fortinet_db.query_record(context,
fortinet_db.Fortinet_ML2_Subnet,
subnet_id=reserved_ip.subnet_id)
if db_subnet:
op(obj, context, resources.DhcpServerRsvAddr.set,
id=db_subnet.edit_id,
vdom=reserved_ip.vdom,
reserved_address=jsonutils.dumps(reserved_addresses))
fortinet_db.delete_record(context, cls, **kwargs)
def add_fwaddress(obj, context, **kwargs):
return add_by_name(obj, context,
fortinet_db.Fortinet_Firewall_Address,
resources.FirewallAddress,
**kwargs)
def set_fwaddress(obj, context, | |
found, a 0 is stored in the
corresponding output value.
See also `tf.batch_gather` and `tf.gather_nd`.
Args:
params: A `Tensor`.
The tensor from which to gather values. Must be at least rank
`axis + 1`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor. Must be in range `[0, params.shape[axis])`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The axis in `params` to gather `indices` from. Defaults to the first
dimension. Supports negative indexes.
batch_dims: An optional `int`. Defaults to `0`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "GatherV2", name,
tld.op_callbacks, params, indices, axis, "batch_dims", batch_dims)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return gather_v2_eager_fallback(
params, indices, axis, batch_dims=batch_dims, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if batch_dims is None:
batch_dims = 0
batch_dims = _execute.make_int(batch_dims, "batch_dims")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"GatherV2", params=params, indices=indices, axis=axis,
batch_dims=batch_dims, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("batch_dims", _op._get_attr_int("batch_dims"), "Tparams",
_op._get_attr_type("Tparams"), "Tindices",
_op._get_attr_type("Tindices"), "Taxis",
_op._get_attr_type("Taxis"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"GatherV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
GatherV2 = tf_export("raw_ops.GatherV2")(_ops.to_raw_op(gather_v2))
def gather_v2_eager_fallback(params, indices, axis, batch_dims, name, ctx):
if batch_dims is None:
batch_dims = 0
batch_dims = _execute.make_int(batch_dims, "batch_dims")
_attr_Tparams, (params,) = _execute.args_to_matching_eager([params], ctx)
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx)
_attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], ctx)
_inputs_flat = [params, indices, axis]
_attrs = ("batch_dims", batch_dims, "Tparams", _attr_Tparams, "Tindices",
_attr_Tindices, "Taxis", _attr_Taxis)
_result = _execute.execute(b"GatherV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"GatherV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('guarantee_const')
def guarantee_const(input, name=None):
r"""Gives a guarantee to the TF runtime that the input tensor is a constant.
The runtime is then free to make optimizations based on this.
Only accepts value typed tensors as inputs and rejects resource variable handles
as input.
Returns the input tensor without modification.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "GuaranteeConst", name,
tld.op_callbacks, input)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return guarantee_const_eager_fallback(
input, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
guarantee_const, (), dict(input=input, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"GuaranteeConst", input=input, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
guarantee_const, (), dict(input=input, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"GuaranteeConst", _inputs_flat, _attrs, _result)
_result, = _result
return _result
GuaranteeConst = tf_export("raw_ops.GuaranteeConst")(_ops.to_raw_op(guarantee_const))
def guarantee_const_eager_fallback(input, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"GuaranteeConst", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"GuaranteeConst", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def identity(input, name=None):
r"""Return a tensor with the same shape and contents as the input tensor or value.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Identity", name,
tld.op_callbacks, input)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return identity_eager_fallback(
input, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Identity", input=input, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Identity", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Identity = tf_export("raw_ops.Identity")(_ops.to_raw_op(identity))
def identity_eager_fallback(input, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Identity", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Identity", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('identity_n')
def identity_n(input, name=None):
r"""Returns a list of tensors with the same shapes and contents as the input
tensors.
This op can be used to override the gradient for complicated functions. For
example, suppose y = f(x) and we wish to apply a custom function g for backprop
such that dx = g(dy). In Python,
```python
with tf.get_default_graph().gradient_override_map(
{'IdentityN': 'OverrideGradientWithG'}):
y, _ = identity_n([f(x), x])
@tf.RegisterGradient('OverrideGradientWithG')
def ApplyG(op, dy, _):
return [None, g(dy)] # Do not backprop to f(x).
```
Args:
input: A list of `Tensor` objects.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "IdentityN", name,
tld.op_callbacks, input)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return identity_n_eager_fallback(
input, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
identity_n, (), dict(input=input, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"IdentityN", input=input, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
identity_n, (), dict(input=input, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op.get_attr("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"IdentityN", _inputs_flat, _attrs, _result)
return _result
IdentityN = tf_export("raw_ops.IdentityN")(_ops.to_raw_op(identity_n))
def identity_n_eager_fallback(input, name, ctx):
_attr_T, input = _execute.convert_to_mixed_eager_tensors(input, ctx)
_inputs_flat = list(input)
_attrs = ("T", _attr_T)
_result = _execute.execute(b"IdentityN", len(input), inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"IdentityN", _inputs_flat, _attrs, _result)
return _result
def immutable_const(dtype, shape, memory_region_name, name=None):
r"""Returns immutable tensor from memory region.
The current implementation memmaps the tensor from a file.
Args:
dtype: A `tf.DType`. Type of the returned tensor.
shape: A `tf.TensorShape` or list of `ints`. Shape of the returned tensor.
memory_region_name: A `string`.
Name of readonly memory region used by the tensor, see
NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "ImmutableConst", name,
tld.op_callbacks, "dtype", dtype, "shape", shape,
"memory_region_name", memory_region_name)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return immutable_const_eager_fallback(
dtype=dtype, shape=shape, memory_region_name=memory_region_name,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
shape = _execute.make_shape(shape, "shape")
memory_region_name = _execute.make_str(memory_region_name, "memory_region_name")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"ImmutableConst", dtype=dtype, shape=shape,
memory_region_name=memory_region_name, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("dtype", _op._get_attr_type("dtype"), "shape",
_op.get_attr("shape"), "memory_region_name",
_op.get_attr("memory_region_name"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"ImmutableConst", _inputs_flat, _attrs, _result)
_result, = _result
return _result
ImmutableConst = tf_export("raw_ops.ImmutableConst")(_ops.to_raw_op(immutable_const))
def immutable_const_eager_fallback(dtype, shape, memory_region_name, name, ctx):
dtype = _execute.make_type(dtype, "dtype")
shape = _execute.make_shape(shape, "shape")
memory_region_name = _execute.make_str(memory_region_name, "memory_region_name")
_inputs_flat = []
_attrs = ("dtype", dtype, "shape", shape, "memory_region_name",
memory_region_name)
_result = _execute.execute(b"ImmutableConst", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"ImmutableConst", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def inplace_add(x, i, v, name=None):
r""" Adds v into specified rows of x.
Computes y = x; y[i, :] += v; return y.
Args:
x: A `Tensor`. A `Tensor` of type T.
i: A `Tensor` of type `int32`.
A vector. Indices into the left-most dimension of `x`.
v: A `Tensor`. Must have the same type as `x`.
A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "InplaceAdd", name,
tld.op_callbacks, | |
<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
########################################################################################################
### LICENSE
########################################################################################################
#
# findmyhash.py - v 1.1.2
#
# This script is under GPL v3 License (http://www.gnu.org/licenses/gpl-3.0.html).
#
# Only this source code is under GPL v3 License. Web services used in this script are under
# different licenses.
#
# If you know some clause in one of these web services which forbids to use it inside this script,
# please contact me to remove the web service as soon as possible.
#
# Developed by JulGor ( http://laxmarcaellugar.blogspot.com/ )
# Mail: bloglaxmarcaellugar AT gmail DOT com
# twitter: @laXmarcaellugar
#
########################################################################################################
### IMPORTS
########################################################################################################
try:
import sys
import hashlib
import urllib2
import getopt
from os import path
from urllib import urlencode
from re import search, findall
from random import seed, randint
from base64 import decodestring, encodestring
from cookielib import LWPCookieJar
except:
print """
Execution error:
You required some basic Python libraries.
This application use: sys, hashlib, urllib, urllib2, os, re, random, getopt, base64 and cookielib.
Please, check if you have all of them installed in your system.
"""
sys.exit(1)
try:
from httplib2 import Http
except:
print """
Execution error:
The Python library httplib2 is not installed in your system.
Please, install it before use this application.
"""
sys.exit(1)
try:
from libxml2 import parseDoc
except:
print """
Execution error:
The Python library libxml2 is not installed in your system.
Because of that, some plugins aren't going to work correctly.
Please, install it before use this application.
"""
########################################################################################################
### CONSTANTS
########################################################################################################
MD4 = "md4"
MD5 = "md5"
SHA1 = "sha1"
SHA224 = "sha224"
SHA256 = "sha256"
SHA384 = "sha384"
SHA512 = "sha512"
RIPEMD = "rmd160"
LM = "lm"
NTLM = "ntlm"
MYSQL = "mysql"
CISCO7 = "cisco7"
JUNIPER = "juniper"
GOST = "gost"
WHIRLPOOL = "whirlpool"
LDAP_MD5 = "ldap_md5"
LDAP_SHA1 = "ldap_sha1"
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Crazy Browser 1.0.5)",
"curl/7.7.2 (powerpc-apple-darwin6.0) libcurl 7.7.2 (OpenSSL 0.9.6b)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b8pre) Gecko/20101213 Firefox/4.0b8pre",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 7.1; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0) chromeframe/10.0.648.205",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727)",
"Opera/9.80 (Windows NT 6.1; U; sv) Presto/2.7.62 Version/11.01",
"Opera/9.80 (Windows NT 6.1; U; pl) Presto/2.7.62 Version/11.00",
"Opera/9.80 (X11; Linux i686; U; pl) Presto/2.6.30 Version/10.61",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.861.0 Safari/535.2",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.872.0 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.812.0 Safari/535.1",
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
]
########################################################################################################
### CRACKERS DEFINITION
########################################################################################################
class SCHWETT:
name = "schwett"
url = "http://schwett.com"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://schwett.com/md5/index.php?md5value=%s&md5c=Hash+Match" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r"<h3><font color='red'>No Match Found</font></h3><br />", html)
if match:
return None
else:
return "The hash is broken, please contact with La X marca el lugar and send it the hash value to add the correct regexp."
class NETMD5CRACK:
name = "netmd5crack"
url = "http://www.netmd5crack.com"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://www.netmd5crack.com/cgi-bin/Crack.py?InputHash=%s" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
regexp = r'<tr><td class="border">%s</td><td class="border">[^<]*</td></tr></table>' % (hashvalue)
match = search (regexp, html)
if match:
match2 = search ( "Sorry, we don't have that hash in our database", match.group() )
if match2:
return None
else:
return match.group().split('border')[2].split('<')[0][2:]
class MD5_CRACKER:
name = "md5-cracker"
url = "http://www.md5-cracker.tk"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://www.md5-cracker.tk/xml.php?md5=%s" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
if response:
try:
doc = parseDoc ( response.read() )
except:
print "INFO: You need libxml2 to use this plugin."
return None
else:
return None
result = doc.xpathEval("//data")
if len(result):
return result[0].content
else:
return None
class BENRAMSEY:
name = "benramsey"
url = "http://tools.benramsey.com"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://tools.benramsey.com/md5/md5.php?hash=%s" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'<string><!\[CDATA\[[^\]]*\]\]></string>', html)
if match:
return match.group().split(']')[0][17:]
else:
return None
class GROMWEB:
name = "gromweb"
url = "http://md5.gromweb.com"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://md5.gromweb.com/query/%s" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
if response:
return response.read()
return response
class HASHCRACKING:
name = "hashcracking"
url = "http://md5.hashcracking.com"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://md5.hashcracking.com/search.php?md5=%s" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'\sis.*', html)
if match:
return match.group()[4:]
return None
class VICTOROV:
name = "hashcracking"
url = "http://victorov.su"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://victorov.su/md5/?md5e=&md5d=%s" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r': <b>[^<]*</b><br><form action="">', html)
if match:
return match.group().split('b>')[1][:-2]
return None
class THEKAINE:
name = "thekaine"
url = "http://md5.thekaine.de"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://md5.thekaine.de/?hash=%s" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'<td colspan="2"><br><br><b>[^<]*</b></td><td></td>', html)
if match:
match2 = search (r'not found', match.group() )
if match2:
return None
else:
return match.group().split('b>')[1][:-2]
class TMTO:
name = "tmto"
url = "http://www.tmto.org"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://www.tmto.org/api/latest/?hash=%s&auth=true" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'text="[^"]+"', html)
if match:
return decodestring(match.group().split('"')[1])
else:
return None
class MD5_DB:
name = "md5-db"
url = "http://md5-db.de"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = | |
"""
MIT License
Copyright (c) 2019-present <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING, Any, ClassVar, Optional, TypeVar, Union, cast, overload
from ._column import Column, SQLType
from ._selectable import Selectable
from .utils import optional_pool, query_builder, resolve_annotation
if TYPE_CHECKING:
from asyncpg import Connection, Record # type: ignore
__all__ = ("Insertable",)
T = TypeVar("T")
class Insertable(Selectable):
if TYPE_CHECKING:
_primary_keys: ClassVar[list[Column]]
@classmethod
def _setup_column(
cls,
name: str,
type: Any,
globals: dict[str, Any],
locals: dict[str, Any],
cache: dict[str, Any],
) -> None:
type = resolve_annotation(type, globals, locals, cache)
if getattr(type, "__origin__", None) is not Column:
raise TypeError("Column typings must be of type Column.")
type = type.__args__[0]
is_array = False
if getattr(type, "__origin__", None) is list:
is_array = True
type = type.__args__[0]
try:
if not issubclass(type, SQLType):
type = SQLType._from_type(list[type] if is_array else type) # type: ignore
elif is_array:
type = SQLType._from_type(list[type.py_type]) # type: ignore
except TypeError:
if getattr(type, "__origin__", None) is not SQLType:
raise TypeError("Column typing generics must be a valid SQLType.")
type = type.__args__[0] # type: ignore
type = SQLType._from_type(list[type] if is_array else type) # type: ignore
if not hasattr(cls, name):
column = Column._with_type(type) # type: ignore
setattr(cls, name, column)
else:
column = getattr(cls, name)
if not isinstance(column, Column):
raise ValueError("Column values must be an instance of Column.")
column._sql_type = type
column.name = name
column.table = cls
cls._columns_dict[name] = column
if column.primary_key:
cls._primary_keys.append(column)
def __init_subclass__(cls, **kwargs: Any) -> None:
cls._primary_keys = []
super().__init_subclass__(**kwargs)
# region: query generation
@classmethod
def _get_primary_keys(
cls,
record: Record,
) -> dict[str, Any]:
return {column.name: record[column.name] for column in cls._primary_keys}
@classmethod
@query_builder
def _build_query_insert(
cls,
columns: Union[Iterable[Union[Column, str]], str],
ignore_on_conflict: bool,
update_on_conflict: Union[Iterable[Union[Column, str]], str],
returning: Union[Iterable[Union[Column, str]], str],
) -> list[str]:
builder = [f"INSERT INTO", cls._name, "("]
if isinstance(columns, str):
builder.append(columns)
else:
for column in columns:
if isinstance(column, Column):
column = column.name
builder.append(column)
builder.append(",")
builder.pop(-1)
builder.append(") VALUES (")
for i, _ in enumerate(columns, 1):
builder.append(f"${i}")
builder.append(",")
builder.pop(-1)
builder.append(")")
if ignore_on_conflict and update_on_conflict:
raise ValueError("")
elif ignore_on_conflict:
builder.append("ON CONFLICT DO NOTHING")
elif update_on_conflict:
builder.append("ON CONFLICT (")
for column in cls._primary_keys:
builder.append(column.name)
builder.append(",")
builder.pop(-1)
builder.append(") DO UPDATE SET")
if isinstance(update_on_conflict, str):
builder.append(update_on_conflict)
else:
for column in update_on_conflict:
if isinstance(column, Column):
column = column.name
builder.append(f"{column} = EXCLUDED.{column}")
builder.append(",")
builder.pop(-1)
if returning:
builder.append("RETURNING")
if isinstance(returning, str):
builder.append(returning)
else:
for column in returning:
if isinstance(column, Column):
column = column.name
builder.append(column)
builder.append(",")
builder.pop(-1)
return builder
@classmethod
@query_builder
def _build_query_update(
cls,
where: str,
offset: int,
columns: Union[Iterable[Union[Column, str]], str],
) -> list[str]:
builder = [f"UPDATE", cls._name, "SET"]
if isinstance(columns, str):
columns = [column.strip() for column in columns.split(",")]
for i, column in enumerate(columns, offset):
if isinstance(column, Column):
column = column.name
builder.append(column)
builder.append(f"= ${i}")
builder.append(",")
builder.pop(-1)
builder.append("WHERE")
builder.append(where)
return builder
@classmethod
@query_builder
def _build_query_delete(
cls,
where: str,
) -> list[str]:
builder = ["DELETE FROM", cls._name]
if where:
builder.append("WHERE")
builder.append(where)
return builder
# endregion
# region: public methods
@overload
@classmethod
@optional_pool
async def insert(
cls,
connection: Connection,
/,
*,
ignore_on_conflict: bool = ...,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = ...,
returning: Union[Iterable[Union[Column, str]], str] = ...,
**values: Any,
) -> Record:
...
@overload
@classmethod
@optional_pool
async def insert(
cls,
connection: Connection,
*,
ignore_on_conflict: bool = ...,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = ...,
returning: None = ...,
**values: Any,
) -> None:
...
@classmethod
@optional_pool
async def insert(
cls,
connection: Connection,
/,
*,
ignore_on_conflict: bool = False,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = None,
returning: Optional[Union[Iterable[Union[Column, str]], str]] = None,
**values: Any,
) -> Optional[Record]:
r"""|coro|
Inserts a new record into the database.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
ignore_on_conflict: :class:`bool`
Sets whether to ignore errors when inserting, defaults to ``False``.
update_on_conflict: Optional[Union[Iterable[Union[:class:`~Column`, :class:`str`]], :class:`str`]]
An Optional list of or string representing columns to update with new data if a conflict occurs.
returning: Optional[Union[Iterable[Union[:class:`~Column`, :class:`str`]], :class:`str`]]
An optional list of or string representing columns to return from the inserted record.
\*\*values: Any
The column to value mapping for the record to insert.
Returns
-------
Optional[:class:`asyncpg.Record`]
A record containing information from the inserted record.
"""
columns = cls._get_columns(values)
query = cls._build_query_insert(columns, ignore_on_conflict, update_on_conflict or [], returning or [])
if returning is not None:
return await connection.fetchrow(query, *values.values())
await connection.execute(query, *values.values())
@overload
@classmethod
@optional_pool
async def insert_many(
cls,
connection: Connection,
/,
columns: Union[Iterable[Union[Column, str]], str],
*values: Iterable[Any],
ignore_on_conflict: bool = False,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = None,
) -> None:
...
@overload
@classmethod
@optional_pool
async def insert_many(
cls,
connection: Connection,
/,
columns: None,
*values: dict[str, Any],
ignore_on_conflict: bool = False,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = None,
) -> None:
...
@classmethod
@optional_pool
async def insert_many(
cls,
connection: Connection,
/,
columns: Optional[Union[Iterable[Union[Column, str]], str]],
*values: Union[Iterable[Any], dict[str, Any]],
ignore_on_conflict: bool = False,
update_on_conflict: Optional[Union[Iterable[Union[Column, str]], str]] = None,
) -> None:
r"""|coro|
Inserts a set of new records into the database.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
\*values: Dict[:class:`str`, Any]
The column to value mappings for each record to insert.
ignore_on_conflict: :class:`bool`
Sets whether to ignore errors when inserting, defaults to ``False``.
update_on_conflict: Optional[Union[Iterable[Union[:class:`~Column`, :class:`str`]], :class:`str`]]
An Optional list of or string representing columns to update with new data if a conflict occurs.
"""
if columns is None:
values = cast(tuple[dict[str, Any], ...], values)
columns = cls._get_columns(values[0])
values = cast(tuple[list[Any]], (list(value.values()) for value in values))
query = cls._build_query_insert(columns, ignore_on_conflict, update_on_conflict or [], [])
await connection.executemany(query, values)
@classmethod
@optional_pool
async def update_where(
cls,
connection: Connection,
/,
where: str,
*values: Any,
**_values: Any,
) -> None:
r"""|coro|
Updates records in the database which match a given WHERE clause.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
where: :class:`str`
An SQL WHERE clause.
\*values: Any
Values to be substituted into the WHERE clause.
\*\*values: Any
The column to value mapping to assign to updated records.
"""
columns = cls._get_columns(_values)
query = cls._build_query_update(where, len(values) + 1, columns)
await connection.execute(query, *values, *_values.values())
@classmethod
@optional_pool
async def update_record(
cls,
connection: Connection,
/,
record: Record,
**values: Any,
) -> None:
r"""|coro|
Updates a record in the database.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
record: :class:`asyncpg.Record`
The record to update.
\*\*values: Any
The column to value mapping to assign to updated record.
"""
primary_keys = cls._get_primary_keys(record)
where = cls._build_where_clause(primary_keys)
return await cls.update_where(connection, where, *primary_keys.values(), **values)
@classmethod
@optional_pool
async def delete_where(
cls,
connection: Connection,
/,
where: str,
*values: Any,
) -> None:
"""|coro|
Deletes records in the database which match the given WHERE clause.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
where: :class:`str`
An SQL WHERE clause.
*values: Any
Values to be substituted into the WHERE clause.
"""
query = cls._build_query_delete(where)
await connection.execute(query, *values)
@classmethod
@optional_pool
async def delete(
cls,
connection: Connection,
/,
**values: Any,
) -> None:
r"""|coro|
Deletes records in the database which contain the given values.
Parameters
----------
connection: :class:`asyncpg.Connection <asyncpg.connection.Connection>`
The database connection to use for transactions.
\*\*values: Any
The column to value mapping to filter records with.
"""
where = cls._build_where_clause(values)
return await cls.delete_where(connection, where, *filter(lambda v: v is not None, | |
import torch
import torch.nn as nn
from torchvision.utils import save_image
import math
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
def elbo_loss(CV_loss, mu_latent, logvar_latent):
"""
This function will add the reconstruction loss (cross entropy as difference between two probability ditributions)
and the KL-Divergence.
KL-Divergence = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
:param CV_loss: loss value given by criterion
:param mu_latent: the mean from the latent vector
:param logvar_latent: log variance from the latent vector
"""
REC_LOSS = CV_loss
KLD = (-0.5 * torch.sum(1 + logvar_latent - mu_latent.pow(2) - logvar_latent.exp()))
return KLD, REC_LOSS, REC_LOSS + KLD
def elbo_loss_alternative(mu_rec, logvar_rec, mu_latent, logvar_latent, orig_x, kl_weight=1):
"""
An alternative which tries to achieve the same underlying goal, in a more mathematical and hands-one manner
This function will add the reconstruction loss (log likelihood of output distribution for the input data) and the
KL-Divergence.
KL-Divergence = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
Reconstruction Loss = - loglikelihood over normal distribution given a sample (orig_x)
= sum(0.5 * log(variance) + (sample - mu)^2 / (2 * variance^2) + log(2pi))
:param mu_rec: decoder output, mean of reconstruction
:param logvar_rec: globally trainable parameter, log_variance of reconstruction
:param mu_latent: the mean from the latent vector
:param logvar_latent: log variance from the latent vector
:param orig_x: input data
:param kl_weight: possible variant, weight for the KLD so to give an arbitrary weight to that part of loss
"""
logvar_rec = logvar_rec * torch.ones_like(mu_rec) # replicate scalar logvar_rec to create diagonal shaped as mu_rec
REC_LOSS = torch.sum(0.5 * (logvar_rec + (orig_x - mu_rec) ** 2 / (torch.exp(logvar_rec)) + math.log(2 * np.pi)))
KLD = kl_weight * (-0.5 * torch.sum(1 + logvar_latent - mu_latent.pow(2) - logvar_latent.exp()))
return KLD, REC_LOSS, REC_LOSS + KLD
def fit(model, dataloader, optimizer, train_data, labelled=True, use_BCE=True):
"""
fit method using Cross Entropy loss, executes one epoch
:param model: VAE model to train
:param dataloader: input dataloader to fatch batches
:param optimizer: which optimizer to utilize
:param train_data: useful for plotting completion bar
:param labelled: to know if the data is composed of (data, target) or only data
:param use_BCE: if True then use BCELoss, otherwise use MSELoss
:return: train loss
"""
model.train() # set in train mode
running_loss, running_kld_loss, running_rec_loss = 0.0, 0.0, 0.0 # set up losses to accumulate over
criterion = nn.BCELoss(reduction='sum') if use_BCE else nn.MSELoss(reduction='sum') # set up criterion for loss
for i, data in tqdm(enumerate(dataloader), total=int(len(train_data) / dataloader.batch_size)):
data = data[0] if labelled else data # get the train batch
data = data.view(data.size(0), -1) # unroll
data = data.float()
optimizer.zero_grad() # set gradient to zero
mu_rec, mu_latent, logvar_latent = model(data) # feedforward
CV_loss = criterion(mu_rec, data) # get rec loss value
loss = elbo_loss(CV_loss, mu_latent, logvar_latent) # get loss value
# update losses
running_kld_loss += loss[0].item()
running_rec_loss += loss[1].item()
running_loss += loss[2].item()
loss[2].backward() # set up gradient with total loss
optimizer.step() # backprop
# set up return variable for all three losses
train_loss = [running_kld_loss / len(dataloader.dataset),
running_rec_loss / len(dataloader.dataset),
running_loss / len(dataloader.dataset)]
return train_loss
def test(model, dataloader, test_data, labelled=None, epoch=None, save=False, plot=True):
"""
test method using Cross Entropy loss, over all dataset, retrieve relevant info if requested
:param model: VAE model to test
:param dataloader: input dataloader to fatch batches
:param test_data: useful for plotting completion bar
:param labelled: to know if the data is composed of (data, target) or only data
:param epoch: for save file name
:param save: to know if needed to save the plot or not
:param plot: to know if needed to plot or not
:return: test loss
"""
model.eval() # set in eval mode
running_loss, running_kld_loss, running_rec_loss = 0.0, 0.0, 0.0 # set up losses to accumulate over
criterion = nn.BCELoss(reduction='sum') # set up criterion for loss
with torch.no_grad():
for i, data in tqdm(enumerate(dataloader), total=int(len(test_data) / dataloader.batch_size)):
# fetch batch
if labelled is not None:
data, label = data
data = data.view(data.size(0), -1)
data = data.float()
mu_rec, mu_latent, logvar_latent = model(data) # feedforward
CV_loss = criterion(mu_rec, data) # get rec loss
loss = elbo_loss(CV_loss, mu_latent, logvar_latent) # get total loss
# update losses
running_kld_loss += loss[0].item()
running_rec_loss += loss[1].item()
running_loss += loss[2].item()
# reconstruct digits and construct latent space (only if the task is the correct one)
if labelled is not None:
if i == int(len(test_data) / dataloader.batch_size) - 1:
save_reconstructed(data, mu_rec, epoch, batch_size=dataloader.batch_size)
if mu_latent.shape[1] > 2:
continue
plt.scatter(mu_latent[:, 0], mu_latent[:, 1], c=label, cmap='tab10', s=5)
# if the task allows for it, if the user wants to plot then plot latent space
if labelled is not None:
if plot:
if mu_latent.shape[1] <= 2:
plt.colorbar()
plt.grid(False)
# save instead of plotting if requested
if save:
plt.savefig(f"../outputs/latent_space/latent_space{epoch}.png")
plt.clf()
else:
plt.show()
# set up return variable for all three losses
test_loss = [running_kld_loss / len(dataloader.dataset),
running_rec_loss / len(dataloader.dataset),
running_loss / len(dataloader.dataset)]
return test_loss
def fit_alternative(model, dataloader, optimizer, train_data, labelled=True):
"""
fit method using alternative loss, executes one epoch
:param model: VAE model to train
:param dataloader: input dataloader to fatch batches
:param optimizer: which optimizer to utilize
:param train_data: useful for plotting completion bar
:param labelled: to know if the data is composed of (data, target) or only data
:return: train loss
"""
model.train() # set in train mode
running_loss, running_kld_loss, running_rec_loss = 0.0, 0.0, 0.0 # set up losses to accumulate over
for i, data in tqdm(enumerate(dataloader), total=int(len(train_data) / dataloader.batch_size)):
data = data[0] if labelled else data # get the train batch
data = data.view(data.size(0), -1) # unroll
optimizer.zero_grad() # set gradient to zero
mu_rec, mu_latent, logvar_latent = model(data) # feedforward
loss = elbo_loss_alternative(mu_rec, model.log_var_rec, mu_latent, logvar_latent, data) # get loss value
# update losses
running_kld_loss += loss[0].item()
running_rec_loss += loss[1].item()
running_loss += loss[2].item()
loss[2].backward() # set up gradient with total loss
optimizer.step() # backprop
# set up return variable for all three losses
train_loss = [running_kld_loss / len(dataloader.dataset),
running_rec_loss / len(dataloader.dataset),
running_loss / len(dataloader.dataset)]
return train_loss
def test_alternative(model, dataloader, test_data, labelled=None, epoch=None, save=False, plot=True):
"""
test method using Cross Entropy loss, over all dataset, retrieve relevant info if requested
:param model: VAE model to test
:param dataloader: input dataloader to fatch batches
:param test_data: useful for plotting completion bar
:param labelled: to know if the data is composed of (data, target) or only data
:param epoch: for save file name
:param save: to know if needed to save the plot or not
:param plot: to know if needed to plot or not
:return: test loss
"""
model.eval() # set in eval mode
running_loss, running_kld_loss, running_rec_loss = 0.0, 0.0, 0.0 # set up losses to accumulate over
with torch.no_grad():
for i, data in tqdm(enumerate(dataloader), total=int(len(test_data) / dataloader.batch_size)):
# fetch batch
if labelled is not None:
data, label = data
data = data.view(data.size(0), -1)
mu_rec, mu_latent, logvar_latent = model(data) # feedforward
loss = elbo_loss_alternative(mu_rec, model.log_var_rec, mu_latent, logvar_latent, data) # get total loss
# update losses
running_kld_loss += loss[0].item()
running_rec_loss += loss[1].item()
running_loss += loss[2].item()
# reconstruct digits and construct latent space (only if the task is the correct one)
if labelled is not None:
if i == int(len(test_data) / dataloader.batch_size) - 1:
save_reconstructed(data, mu_rec, epoch, batch_size=dataloader.batch_size)
if mu_latent.shape[1] > 2:
continue
plt.scatter(mu_latent[:, 0], mu_latent[:, 1], c=label, cmap='tab10', s=5)
# if the task allows for it, if the user wants to plot then plot latent space
if labelled is not None:
if plot:
if mu_latent.shape[1] <= 2:
plt.colorbar()
plt.grid(False)
# save instead of plotting if requested
if save:
plt.savefig(f"../outputs/latent_space/latent_space{epoch}.png")
plt.clf()
else:
plt.show()
# set up return variable for all three losses
test_loss = [running_kld_loss / len(dataloader.dataset),
running_rec_loss / len(dataloader.dataset),
running_loss / len(dataloader.dataset)]
return test_loss
def plot_reconstructed_digits(model, epoch=None, r0=(-8, 8), r1=(-8, 8), n=30, save=False):
"""
plot figure showing digits distribution over the latent space
:param model: the VAE model to use
:param epoch: epoch num, needed for save file name
:param r0: defines the ensemble of values to try on first axes
:param r1: defines the | |
case of gRPC transcoding
@property
def field_headers(self) -> Sequence[str]:
"""Return the field headers defined for this method."""
http = self.options.Extensions[annotations_pb2.http]
pattern = re.compile(r'\{([a-z][\w\d_.]+)=')
potential_verbs = [
http.get,
http.put,
http.post,
http.delete,
http.patch,
http.custom.path,
]
return next((tuple(pattern.findall(verb)) for verb in potential_verbs if verb), ())
@property
def http_opt(self) -> Optional[Dict[str, str]]:
"""Return the http option for this method.
e.g. {'verb': 'post'
'url': '/some/path'
'body': '*'}
"""
http: List[Tuple[descriptor_pb2.FieldDescriptorProto, str]]
http = self.options.Extensions[annotations_pb2.http].ListFields()
if len(http) < 1:
return None
http_method = http[0]
answer: Dict[str, str] = {
'verb': http_method[0].name,
'url': http_method[1],
}
if len(http) > 1:
body_spec = http[1]
answer[body_spec[0].name] = body_spec[1]
# TODO(yon-mg): handle nested fields & fields past body i.e. 'additional bindings'
# TODO(yon-mg): enums for http verbs?
return answer
@property
def path_params(self) -> Sequence[str]:
"""Return the path parameters found in the http annotation path template"""
# TODO(yon-mg): fully implement grpc transcoding (currently only handles basic case)
if self.http_opt is None:
return []
pattern = r'\{(\w+)\}'
return re.findall(pattern, self.http_opt['url'])
@property
def query_params(self) -> Set[str]:
"""Return query parameters for API call as determined by http annotation and grpc transcoding"""
# TODO(yon-mg): fully implement grpc transcoding (currently only handles basic case)
# TODO(yon-mg): remove this method and move logic to generated client
if self.http_opt is None:
return set()
params = set(self.path_params)
body = self.http_opt.get('body')
if body:
params.add(body)
return set(self.input.fields) - params
# TODO(yon-mg): refactor as there may be more than one method signature
@utils.cached_property
def flattened_fields(self) -> Mapping[str, Field]:
"""Return the signature defined for this method."""
cross_pkg_request = self.input.ident.package != self.ident.package
def filter_fields(sig: str) -> Iterable[Tuple[str, Field]]:
for f in sig.split(','):
if not f:
# Special case for an empty signature
continue
name = f.strip()
field = self.input.get_field(*name.split('.'))
name += '_' if field.field_pb.name in utils.RESERVED_NAMES else ''
if cross_pkg_request and not field.is_primitive:
# This is not a proto-plus wrapped message type,
# and setting a non-primitive field directly is verboten.
continue
yield name, field
signatures = self.options.Extensions[client_pb2.method_signature]
answer: Dict[str, Field] = collections.OrderedDict(
name_and_field
for sig in signatures
for name_and_field in filter_fields(sig)
)
return answer
@utils.cached_property
def flattened_field_to_key(self):
return {field.name: key for key, field in self.flattened_fields.items()}
@utils.cached_property
def legacy_flattened_fields(self) -> Mapping[str, Field]:
"""Return the legacy flattening interface: top level fields only,
required fields first"""
required, optional = utils.partition(lambda f: f.required,
self.input.fields.values())
return collections.OrderedDict((f.name, f)
for f in chain(required, optional))
@property
def grpc_stub_type(self) -> str:
"""Return the type of gRPC stub to use."""
return '{client}_{server}'.format(
client='stream' if self.client_streaming else 'unary',
server='stream' if self.server_streaming else 'unary',
)
# TODO(yon-mg): figure out why idempotent is reliant on http annotation
@utils.cached_property
def idempotent(self) -> bool:
"""Return True if we know this method is idempotent, False otherwise.
Note: We are intentionally conservative here. It is far less bad
to falsely believe an idempotent method is non-idempotent than
the converse.
"""
return bool(self.options.Extensions[annotations_pb2.http].get)
@property
def ident(self) -> metadata.Address:
"""Return the identifier data to be used in templates."""
return self.meta.address
@utils.cached_property
def paged_result_field(self) -> Optional[Field]:
"""Return the response pagination field if the method is paginated."""
# If the request field lacks any of the expected pagination fields,
# then the method is not paginated.
# The request must have page_token and next_page_token as they keep track of pages
for source, source_type, name in ((self.input, str, 'page_token'),
(self.output, str, 'next_page_token')):
field = source.fields.get(name, None)
if not field or field.type != source_type:
return None
# The request must have max_results or page_size
page_fields = (self.input.fields.get('max_results', None),
self.input.fields.get('page_size', None))
page_field_size = next(
(field for field in page_fields if field), None)
if not page_field_size or page_field_size.type != int:
return None
# Return the first repeated field.
for field in self.output.fields.values():
if field.repeated:
return field
# We found no repeated fields. Return None.
return None
@utils.cached_property
def ref_types(self) -> Sequence[Union[MessageType, EnumType]]:
return self._ref_types(True)
@utils.cached_property
def flat_ref_types(self) -> Sequence[Union[MessageType, EnumType]]:
return self._ref_types(False)
def _ref_types(self, recursive: bool) -> Sequence[Union[MessageType, EnumType]]:
"""Return types referenced by this method."""
# Begin with the input (request) and output (response) messages.
answer: List[Union[MessageType, EnumType]] = [self.input]
types: Iterable[Union[MessageType, EnumType]] = (
self.input.recursive_field_types if recursive
else (
f.type
for f in self.flattened_fields.values()
if f.message or f.enum
)
)
answer.extend(types)
if not self.void:
answer.append(self.client_output)
answer.extend(self.client_output.field_types)
answer.append(self.client_output_async)
answer.extend(self.client_output_async.field_types)
# If this method has LRO, it is possible (albeit unlikely) that
# the LRO messages reside in a different module.
if self.lro:
answer.append(self.lro.response_type)
answer.append(self.lro.metadata_type)
# If this message paginates its responses, it is possible
# that the individual result messages reside in a different module.
if self.paged_result_field and self.paged_result_field.message:
answer.append(self.paged_result_field.message)
# Done; return the answer.
return tuple(answer)
@property
def void(self) -> bool:
"""Return True if this method has no return value, False otherwise."""
return self.output.ident.proto == 'google.protobuf.Empty'
def with_context(self, *, collisions: FrozenSet[str]) -> 'Method':
"""Return a derivative of this method with the provided context.
This method is used to address naming collisions. The returned
``Method`` object aliases module names to avoid naming collisions
in the file being written.
"""
maybe_lro = None
if self.lro:
maybe_lro = self.lro.with_context(
collisions=collisions
) if collisions else self.lro
return dataclasses.replace(
self,
lro=maybe_lro,
input=self.input.with_context(collisions=collisions),
output=self.output.with_context(collisions=collisions),
meta=self.meta.with_context(collisions=collisions),
)
@dataclasses.dataclass(frozen=True)
class CommonResource:
type_name: str
pattern: str
@classmethod
def build(cls, resource: resource_pb2.ResourceDescriptor):
return cls(
type_name=resource.type,
pattern=next(iter(resource.pattern))
)
@utils.cached_property
def message_type(self):
message_pb = descriptor_pb2.DescriptorProto()
res_pb = message_pb.options.Extensions[resource_pb2.resource]
res_pb.type = self.type_name
res_pb.pattern.append(self.pattern)
return MessageType(
message_pb=message_pb,
fields={},
nested_enums={},
nested_messages={},
)
@dataclasses.dataclass(frozen=True)
class Service:
"""Description of a service (defined with the ``service`` keyword)."""
service_pb: descriptor_pb2.ServiceDescriptorProto
methods: Mapping[str, Method]
# N.B.: visible_resources is intended to be a read-only view
# whose backing store is owned by the API.
# This is represented by a types.MappingProxyType instance.
visible_resources: Mapping[str, MessageType]
meta: metadata.Metadata = dataclasses.field(
default_factory=metadata.Metadata,
)
common_resources: ClassVar[Mapping[str, CommonResource]] = dataclasses.field(
default={
"cloudresourcemanager.googleapis.com/Project": CommonResource(
"cloudresourcemanager.googleapis.com/Project",
"projects/{project}",
),
"cloudresourcemanager.googleapis.com/Organization": CommonResource(
"cloudresourcemanager.googleapis.com/Organization",
"organizations/{organization}",
),
"cloudresourcemanager.googleapis.com/Folder": CommonResource(
"cloudresourcemanager.googleapis.com/Folder",
"folders/{folder}",
),
"cloudbilling.googleapis.com/BillingAccount": CommonResource(
"cloudbilling.googleapis.com/BillingAccount",
"billingAccounts/{billing_account}",
),
"locations.googleapis.com/Location": CommonResource(
"locations.googleapis.com/Location",
"projects/{project}/locations/{location}",
),
},
init=False,
compare=False,
)
def __getattr__(self, name):
return getattr(self.service_pb, name)
@property
def client_name(self) -> str:
"""Returns the name of the generated client class"""
return self.name + "Client"
@property
def async_client_name(self) -> str:
"""Returns the name of the generated AsyncIO client class"""
return self.name + "AsyncClient"
@property
def transport_name(self):
return self.name + "Transport"
@property
def grpc_transport_name(self):
return self.name + "GrpcTransport"
@property
def grpc_asyncio_transport_name(self):
return self.name + "GrpcAsyncIOTransport"
@property
def rest_transport_name(self):
return self.name + "RestTransport"
@property
def has_lro(self) -> bool:
"""Return whether the service has a long-running method."""
return any([m.lro for m in self.methods.values()])
@property
def has_pagers(self) -> bool:
"""Return whether the service has paged methods."""
return any(m.paged_result_field for m in self.methods.values())
@property
def host(self) -> str:
"""Return the hostname for this service, if specified.
Returns:
str: The hostname, with no protocol and no trailing ``/``.
"""
if self.options.Extensions[client_pb2.default_host]:
return self.options.Extensions[client_pb2.default_host]
return ''
@property
def shortname(self) -> str:
"""Return the API short name. DRIFT uses this to identify
APIs.
Returns:
str: The api shortname.
"""
# Get the shortname from the host
# Real APIs are expected to have format:
# "{api_shortname}.googleapis.com"
return self.host.split(".")[0]
@property
def oauth_scopes(self) -> Sequence[str]:
"""Return a sequence of oauth scopes, if applicable.
Returns:
Sequence[str]: A sequence of OAuth scopes.
"""
# Return the OAuth scopes, split on comma.
return tuple(
i.strip()
for i in self.options.Extensions[client_pb2.oauth_scopes].split(',')
if i
)
@property
def module_name(self) -> str:
"""Return the appropriate module name for this service.
Returns:
str: The service name, in snake case.
"""
return utils.to_snake_case(self.name)
@utils.cached_property
def names(self) -> FrozenSet[str]:
"""Return a set of names used in this service.
This is used for detecting naming collisions in the module names
used for imports.
"""
# Put together a set of the service and method names.
answer = {self.name, self.client_name, self.async_client_name}
answer.update(
utils.to_snake_case(i.name) for i in self.methods.values()
)
# Identify any import module names where the same module name is used
# from distinct packages.
modules: Dict[str, Set[str]] = collections.defaultdict(set)
for m in self.methods.values():
for t in m.ref_types:
modules[t.ident.module].add(t.ident.package)
answer.update(
module_name
for module_name, packages in modules.items()
if len(packages) > 1
)
# Done; return the answer.
return frozenset(answer)
@utils.cached_property
def resource_messages(self) | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
# ----------------
# Test Functions
# ----------------
from six.moves import zip, range
# from wbia.plottool import draw_func2 as df2
# from wbia.plottool.viz_keypoints import show_keypoints
import utool as ut
import matplotlib as mpl
import numpy as np
import vtool.linalg as ltool
TAU = 2 * np.pi # References: tauday.com
def draw_expanded_scales(imgL, sel_kpts, exkpts, exdesc_):
from wbia import plottool as pt
draw_keypoint_patch = pt.draw_keypoint_patch
get_warped_patch = pt.get_warped_patch # NOQA
# Rows are for different scales
# Cols are for different patches
# There is a prefix row showing the original image
nRows, nCols = len(exkpts), len(sel_kpts)
exkpts_ = np.vstack(exkpts)
fnum = 1
pt.figure(fnum=fnum, docla=True, doclf=True)
nPreRows = 1
nPreCols = (nPreRows * nCols) + 1
pt.show_keypoints(
imgL, exkpts_, fnum=fnum, pnum=(nRows + nPreRows, 1, 1), color=pt.BLUE
)
px = 0
for row, kpts_ in enumerate(exkpts):
for col, kp in enumerate(kpts_):
sift = exdesc_[px]
pnum = (nRows + nPreRows, nCols, px + nPreCols)
draw_keypoint_patch(imgL, kp, sift, warped=True, fnum=fnum, pnum=pnum)
px += 1
# df2.draw()
print('nRows = %r' % nRows)
print('nCols = %r' % nCols)
def in_depth_ellipse(kp):
"""
Makes sure that I understand how the ellipse is created form a keypoint
representation. Walks through the steps I took in coming to an
understanding.
CommandLine:
python -m tests.test_ellipse --test-in_depth_ellipse --show --num-samples=12
Example:
>>> # SCRIPT
>>> from tests.test_ellipse import * # NOQA
>>> import pyhestest
>>> test_data = pyhestest.load_test_data(short=True)
>>> kpts = test_data['kpts']
>>> kp = kpts[0]
>>> #kp = np.array([0, 0, 10, 10, 10, 0])
>>> test_locals = in_depth_ellipse(kp)
>>> ut.quit_if_noshow()
>>> ut.show_if_requested()
"""
from wbia import plottool as pt
# nSamples = 12
nSamples = ut.get_argval('--num-samples', type_=int, default=12)
kp = np.array(kp, dtype=np.float64)
# -----------------------
# SETUP
# -----------------------
np.set_printoptions(precision=3)
# pt.reset()
pt.figure(9003, docla=True, doclf=True)
ax = pt.gca()
ax.invert_yaxis()
def _plotpts(data, px, color=pt.BLUE, label='', marker='.', **kwargs):
# pt.figure(9003, docla=True, pnum=(1, 1, px))
pt.plot2(data.T[0], data.T[1], marker, '', color=color, label=label, **kwargs)
# pt.update()
def _plotarrow(x, y, dx, dy, color=pt.BLUE, label=''):
ax = pt.gca()
arrowargs = dict(head_width=0.5, length_includes_head=True, label=label)
arrow = mpl.patches.FancyArrow(x, y, dx, dy, **arrowargs)
arrow.set_edgecolor(color)
arrow.set_facecolor(color)
ax.add_patch(arrow)
# pt.update()
# -----------------------
# INPUT
# -----------------------
print('kp = %s' % ut.repr2(kp, precision=3))
print('--------------------------------')
print('Let V = Perdoch.A')
print('Let Z = Perdoch.E')
print('Let invV = Perdoch.invA')
print('--------------------------------')
print("Input from Perdoch's detector: ")
# We are given the keypoint in invA format
if len(kp) == 5:
(ix, iy, iv11, iv21, iv22) = kp
iv12 = 0
elif len(kp) == 6:
(ix, iy, iv11, iv21, iv22, ori) = kp
iv12 = 0
invV = np.array([[iv11, iv12, ix], [iv21, iv22, iy], [0, 0, 1]])
V = np.linalg.inv(invV)
Z = (V.T).dot(V)
import vtool as vt
V_2x2 = V[0:2, 0:2]
Z_2x2 = Z[0:2, 0:2]
V_2x2_ = vt.decompose_Z_to_V_2x2(Z_2x2)
assert np.all(np.isclose(V_2x2, V_2x2_))
# C = np.linalg.cholesky(Z)
# np.isclose(C.dot(C.T), Z)
# Z
print('invV is a transform from points on a unit-circle to the ellipse')
ut.horiz_print('invV = ', invV)
print('--------------------------------')
print('V is a transformation from points on the ellipse to a unit circle')
ut.horiz_print('V = ', V)
print('--------------------------------')
print('An ellipse is a special case of a conic. For any ellipse:')
print('Points on the ellipse satisfy (x_ - x_0).T.dot(Z).dot(x_ - x_0) = 1')
print('where Z = (V.T).dot(V)')
ut.horiz_print('Z = ', Z)
# Define points on a unit circle
theta_list = np.linspace(0, TAU, nSamples)
cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in theta_list])
# Transform those points to the ellipse using invV
ellipse_pts1 = invV.dot(cicrle_pts.T).T
# Lets check our assertion: (x_ - x_0).T.dot(Z).dot(x_ - x_0) == 1
x_0 = np.array([ix, iy, 1])
checks = [(x_ - x_0).T.dot(Z).dot(x_ - x_0) for x_ in ellipse_pts1]
try:
# HELP: The phase is off here. in 3x3 version I'm not sure why
# assert all([almost_eq(1, check) for check in checks1])
is_almost_eq_pos1 = [ut.almost_eq(1, check) for check in checks]
is_almost_eq_neg1 = [ut.almost_eq(-1, check) for check in checks]
assert all(is_almost_eq_pos1)
except AssertionError as ex:
print('circle pts = %r ' % cicrle_pts)
print(ex)
print(checks)
print([ut.almost_eq(-1, check, 1e-9) for check in checks])
raise
else:
# assert all([abs(1 - check) < 1E-11 for check in checks2])
print('... all of our plotted points satisfy this')
# =======================
# THE CONIC SECTION
# =======================
# All of this was from the Perdoch paper, now lets move into conic sections
# We will use the notation from wikipedia
# References:
# http://en.wikipedia.org/wiki/Conic_section
# http://en.wikipedia.org/wiki/Matrix_representation_of_conic_sections
# -----------------------
# MATRIX REPRESENTATION
# -----------------------
# The matrix representation of a conic is:
# (A, B2, B2_, C) = Z.flatten()
# (D, E, F) = (0, 0, 1)
(A, B2, D2, B2_, C, E2, D2_, E2_, F) = Z.flatten()
B = B2 * 2
D = D2 * 2
E = E2 * 2
assert B2 == B2_, 'matrix should by symmetric'
assert D2 == D2_, 'matrix should by symmetric'
assert E2 == E2_, 'matrix should by symmetric'
print('--------------------------------')
print("Now, using wikipedia' matrix representation of a conic.")
con = np.array(
(
(' A', 'B / 2', 'D / 2'),
('B / 2', ' C', 'E / 2'),
('D / 2', 'E / 2', ' F'),
)
)
ut.horiz_print('A matrix A_Q = ', con)
# A_Q is our conic section (aka ellipse matrix)
A_Q = np.array(((A, B / 2, D / 2), (B / 2, C, E / 2), (D / 2, E / 2, F)))
ut.horiz_print('A_Q = ', A_Q)
# -----------------------
# DEGENERATE CONICS
# References:
# http://individual.utoronto.ca/somody/quiz.html
print('----------------------------------')
print('As long as det(A_Q) != it is not degenerate.')
print('If the conic is not degenerate, we can use the 2x2 minor: A_33')
print('det(A_Q) = %s' % str(np.linalg.det(A_Q)))
assert np.linalg.det(A_Q) != 0, 'degenerate conic'
A_33 = np.array(((A, B / 2), (B / 2, C)))
ut.horiz_print('A_33 = ', A_33)
# -----------------------
# CONIC CLASSIFICATION
# -----------------------
print('----------------------------------')
print('The determinant of the minor classifies the type of conic it is')
print('(det == 0): parabola, (det < 0): hyperbola, (det > 0): ellipse')
print('det(A_33) = %s' % str(np.linalg.det(A_33)))
assert np.linalg.det(A_33) > 0, 'conic is not an ellipse'
print('... this is indeed an ellipse')
# -----------------------
# CONIC CENTER
# -----------------------
print('----------------------------------')
print('the centers of the ellipse are obtained by: ')
print('x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)')
print('y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)')
# Centers are obtained by solving for where the gradient of the quadratic
# becomes 0. Without going through the derivation the calculation is...
# These should be 0, 0 if we are at the origin, or our original x, y
# coordinate specified by the keypoints. I'm doing the calculation just for
# shits and giggles
x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)
y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)
ut.horiz_print('x_center = ', x_center)
ut.horiz_print('y_center = ', y_center)
# -----------------------
# MAJOR AND MINOR AXES
# -----------------------
# Now we are going to determine the major and minor axis
# of this beast. It just the center augmented by the eigenvecs
print('----------------------------------')
# Plot ellipse axis
# !HELP! I DO NOT KNOW WHY I HAVE TO DIVIDE, SQUARE ROOT, AND NEGATE!!!
(evals, evecs) = np.linalg.eig(A_33)
l1, l2 = evals
# The major and minor axis lengths
b = 1 / np.sqrt(l1)
a = 1 / np.sqrt(l2)
v1, v2 = evecs
# Find the transformation to align the axis
nminor = v1
nmajor = v2
dx1, dy1 = v1 * b
dx2, dy2 = v2 * a
minor = np.array([dx1, -dy1])
major = np.array([dx2, -dy2])
x_axis = np.array([[1], [0]])
cosang = (x_axis.T.dot(nmajor)).T
# Rotation angle
theta = np.arccos(cosang)
print('a = ' + str(a))
print('b = ' + str(b))
print('theta = ' + str(theta[0] / TAU) + ' * 2pi')
# The warped | |
to plane-stress conditions, the transformed in-plane stress and strain systems of equations can be derived by substituting the plane-stress conditioned stiffness and compliance matrices (Eqn. [23], [24]) into Eqns. [31] and [32]:
#
# $$ [\mathbf{\bar{Q}}] = [\mathbf{\hat{T}_{\sigma}}][\mathbf{Q}][\mathbf{\hat{T}_{\varepsilon}}]^{-1} $$
#
# $$ [\mathbf{\bar{S}}] = [\mathbf{\hat{T}_{\varepsilon}}][\mathbf{S}][\mathbf{\hat{T}_{\sigma}}]^{-1} $$
#
#
# ### 2.3.3 References
#
# 1. <NAME>. (2001). Transformation of stresses and strains. Lecture Notes for Mechanics of Materials.
# ## 2.4 Laminate structural durability calculations
# ### 2.4.1 Stiffness matrices for in-plane stress conditioned laminates
#
# Eqn.s [35] and [36] represent the plane-stressed conditioned stiffness and compliance matrices for individual plies. To approximate the stress-strain relationships of entire laminates (multi-layer manufactured plies), we define the [A], [B] and [D] stiffness matrices:
#
# $$ [\mathbf{A}] = \int_{-h_{b}}^{h_{t}} [\bar{\mathbf{Q}}]dz $$, and each $[A_{ij}]$ element defined by:
#
# $$ A_{ij} = \int_{-h_{b}}^{h_{t}} \bar{Q_{ij}}dz $$
#
# Where:
#
# * $h_{b}$ represents the distance of the laminate plies from the reference plane to the bottom surface of the entire laminate structure
# * $h_{t}$ represents the distance of the laminate plies from the reference plane to the top surface of the entire laminate structure
# * Recall that $[\bar{Q}]$ represents the in-plane stress conditioned, transformed stiffness matrix of each ply
#
# The variable '$\mathbf{z}$' in Eqn. [37] defines the distance of the '$\mathbf{z^{th}}$' ply from the reference plane
#
# $$ [\mathbf{B}] = \int_{-h_{b}}^{h_{t}} z[\bar{\mathbf{Q}}]dz $$, and each $[B_{ij}]$ element defined by:
#
# $$B_{ij}] = \int_{-h_{b}}^{h_{t}} z\bar{Q_{ij}}dz $$
#
# and
#
# $$ [\mathbf{D}] = \int_{-h_{b}}^{h_{t}} z^{2}[\bar{\mathbf{Q}}]dz $$, and each $[D_{ij}]$ element defined by:
#
# $$ D_{ij} = \int_{-h_{b}}^{h_{t}} z^{2}\bar{Q_{ij}}dz $$
#
# From the assumption that the composite plies and laminates, tested for the quasi-static fatigue loading investigations, exhibit linear elastic behaviour, it is assumed that $[\bar{Q}]$ is constant across each ply. Thus, the laminate stiffness and compliance integrals above can be replaced by the summations:
#
# $$ A_{ij} = \sum_{k=1}^{K} (\bar{Q_{ij}})_{k}(z_{k}-z_{k-1}) $$
#
# $$ B_{ij} = \frac{1}{2}\sum_{k=1}^{K} (\bar{Q_{ij}})_{k}(z^{2}_{k}-z^{2}_{k-1}) $$
#
# $$ D_{ij} = \frac{1}{3}\sum_{k=1}^{K} (\bar{Q_{ij}})_{k}(z^{3}_{k}-z^{3}_{k-1}) $$
# ## 2.5 Mechanics of in-plane stress-conditioned composite laminates
# ### 2.5.1 In-plane forces and moments
#
# From the [A], [B] and [D] in-plane stiffness matrix elements described in Eqns. [43] through [45], the in-plane forces and moments of the laminate can be related to the in-plane strain and curvature response of the laminate. For a laminate subject to the in-plane stress condition in the 1-2 plane, this relationship is:
#
# $$ \left( \begin{array}{c} N_{1} \\ N_{2} \\ N_{1-2} \\ M_{1} \\ M_{2} \\ M_{1-2} \\ \end{array} \right) =
# \begin{bmatrix} A_{11} & A_{12} & A_{16} & B_{11} & B_{12} & B_{16} \\
# A_{21} & A_{22} & A_{26} & B_{21} & B_{22} & B_{26} \\
# A_{61} & A_{62} & A_{66} & B_{61} & B_{62} & B_{66} \\
# B_{11} & B_{12} & B_{16} & D_{11} & D_{12} & D_{16} \\
# B_{21} & B_{22} & B_{26} & D_{21} & D_{22} & D_{26} \\
# B_{61} & B_{62} & B_{66} & D_{61} & D_{62} & D_{66} \\
# \end{bmatrix}
# \cdot
# \left( \begin{array}{c} \varepsilon^o_{1} \\ \varepsilon^o_{2} \\ \gamma^o_{1-2} \\ \kappa_{1} \\ \kappa_{2} \\ \kappa_{12} \\ \end{array} \right) $$
#
# Inversion of Eqn. [46] defines the strain and curvature of the laminate in terms of the in-plane force loading and moments of the laminate. For a laminate force-loaded in the 1-2 plane:
#
# $$ \left(\begin{array}{c} \varepsilon^o_{1} \\ \varepsilon^o_{2} \\ \gamma^o_{1-2} \\ \kappa_{1} \\ \kappa_{2} \\ \kappa_{12} \\ \end{array} \right) =
# \begin{bmatrix} \alpha_{11} & \alpha_{12} & \alpha_{16} & \beta_{11} & \beta_{12} & \beta_{16} \\
# \alpha_{21} & \alpha_{22} & \alpha_{26} & \beta_{21} & \beta_{22} & \beta_{26} \\
# \alpha_{61} & \alpha_{62} & \alpha_{66} & \beta_{61} & \beta_{62} & \beta_{66} \\
# \beta_{11} & \beta_{12} & \beta_{16} & \delta_{11} & \delta_{12} & \delta_{16} \\
# \beta_{21} & \beta_{22} & \beta_{26} & \delta_{21} & \delta_{22} & \delta_{26} \\
# \beta_{61} & \beta_{62} & \beta_{66} & \delta_{61} & \delta_{62} & \delta_{66} \\
# \end{bmatrix}
# \cdot
# \left(\begin{array}{c} N_{1} \\ N_{2} \\ N_{1-2} \\ M_{1} \\ M_{2} \\ M_{1-2} \\ \end{array} \right)
# $$
#
# ### 2.5.2 Importance of the [A], [B] and [D] matrices to laminate structural durability analyses
#
# The [A], [B] and [D] matrices characterize the stiffness of the laminates, the degree to which the composite laminate will elastically deform, when subjected to certain force-loading conditions.
#
# For the purposes of the quasi-static fatigue (tensile and compressive) in-plane force-loading of carbon fibre/epoxy laminate composite coupons (the experiments designed to investigate the *linear* elastic structural durability of these composite materials), the significance of these matrices are as follows:
#
# 1. The $A_{ij}$ stiffness matrix elements relate the in-plane forces, imposed on the laminate coupons, to the in-plane (*elastic*) deformations manifested in the laminates (*under tensile or compressive force-loading*)
#
# 2. The $B_{ij}$ stiffness matrix elements are the in-plane–out-of-plane coupling stiffnesses that relate the:
# * in-plane forces, imposed on the laminate coupons, to the resultant curvatures of the laminate
# * moments, imposed on the laminate, to the resultant in-plane deformation of the laminate
#
# 3. The $D_{ij}$ stiffness matrix elements are the bending stiffnesses that relate the moments, imposed on the laminate, to the resultant curvatures of the laminate
#
# Examination of the [A], [B], and [D] matrices show that different types of couplings may occur. For the experimental (tensile and compressive force-loading) of the composite laminate coupons in the 1-2 plane, the following important force-moment-curvature-deformation couplings are worth noting:
#
# 1. **Extension–shear coupling**
# * When the elements $A_{16}$, $A_{26}$ (of the $A_{ij}$ elements) are not zero, in-plane normal forces ($N_{1}, N_{2}$) cause shear deformation ($\gamma^o_{1-2}$), and a twist force ($N_{1-2}$) causes elongations in the 1 and 2 directions
#
# 2. **Bending–twist coupling**
# * When the elements $D_{16}$, $D_{26}$ are not zero, bending moments ($M_{1}, M_{2}$) may cause a twisting of the laminate ($\kappa_{1-2}$), and a twist moment ($M_{1-2}$) causes curvatures in the 1–3 and 2–3 planes
#
# 3. **Extension–twist and bending–shear coupling**
# * When the elements $B_{16}$, $B_{26}$ are not zero, in-plane normal forces ($N_{1}, N_{2}$) cause twist ($\kappa_{1-2}$), and bending moments ($M_{1}, M_{2}$) result in shear deformation ($\gamma^o_{1-2}$)
#
# 4. **In-plane–out-of-plane coupling**
# * When the $B_{ij}$ stiffness matrix elements are not zero, in-plane forces ($N_{1}, N_{2}, N_{1-2}$) cause out-of-plane deformations (curvatures) of the laminate, and moments ($M_{1}, M_{2}, M_{1-2}$) cause in-plane deformations in the 1-2 plane.
#
# It is worth noting that these four types of coupling are characteristic of composite materials and do not occur in homogeneous isotropic materials. The following two couplings occur in both composite and isotropic materials:
#
# 5. **Extension–extension coupling**
# * When the element $A_{12}$ is not zero, a normal force $N_{1}$ causes elongation in the 2 direction ($\varepsilon^o_{2}$), and a normal force $N_{2}$ causes elongation in the 1 direction ($\varepsilon^o_{1}$)
#
# 6. **Bending–bending coupling**
# * When the element $D_{12}$ is not zero, a bending moment $M_{1}$ causes curvature of the laminate in the 2-3 plane ($\kappa_{2}$), and a bending moment $M_{2}$ causes curvature of the laminate in the 1–3 plane ($\kappa_{1}$)
# ## 2.6 Applications of [A], [B], [D] ( [$\alpha$], [$\beta$], [$\delta$] ) matrices to the (*elastic*) structural durability characterization of experiment carbon fibre/epoxy composite coupons
# * [§3: Structural durability analyses of carbon fibre & epoxy-based composites - Experimental results](DLN - §3 - Structural durability analyses of carbon fibre & epoxy-based composites - Experimental.ipynb) is the DLN entry that uses Python scientific programming libraries to explore and visualize quasi-fatigue tensile & compressive loading experiments on carbon fibre/epoxy composite test coupons. From analyses of the experiments, the elastic properties of the test coupons are determined.
# <br>
#
# * [§4: Structural durability analyses of carbon fibre & epoxy-based composites - Matrix calculations](DLN - §2 - Structural durability analyses of carbon fibre & epoxy-based composites - Calculations.ipynb) is the DLN entry that uses MATLAB to perform structural durability matrix calculations from carbon fibre/epoxy composite test | |
<reponame>asrlabncku/RAP
from chainer import cuda
from chainer import function
from chainer.utils import type_check
from chainer.utils import conv
from ctypes import *
from six import moves
import numpy as np
import cupy
import time
import os
dllpath = '/home/monica/Documents/chainer-1.17.0-RAP/chainer/functions/cnet/libcnet.so'
lib = CDLL(dllpath, RTLD_GLOBAL)
# def _kern():
# return cuda.elementwise(
# 'T x', 'T y',
# 'y = x >= 0 ? 1 : -1',
# 'binarize')
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class data_t(Structure):
_fields_ = [("size", c_int),
("val", POINTER(c_float)),
("grad", POINTER(c_float))]
class conv_layer_t(Structure):
_fields_ = [("ic", c_int), # in height
("iw", c_int), # in width
("ih", c_int), # in channel
("oc", c_int), # out height
("ow", c_int), # out width
("oh", c_int), # out channel
("k", c_int), # kernel
("s", c_int), # stride
("p", c_int)] # padding
class layer(Structure):
_fields_ = [("batch", c_int),
("input", data_t),
("output", data_t),
("weight", data_t),
("bias", data_t),
("extra", data_t)]
forward_convolutional_layer = lib.conv_layer_forward
forward_convolutional_layer.argtypes = [POINTER(layer), POINTER(conv_layer_t)]
backward_convolutional_layer = lib.conv_layer_backward
backward_convolutional_layer.argtypes = [POINTER(layer), POINTER(conv_layer_t)]
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
class CnetConvolution2DFunction(function.Function):
def __init__(self, stride=1, pad=0, use_cudnn=True):
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype == np.float32,
w_type.dtype == np.float32,
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1],
)
if n_in.eval() == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == np.float32,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_cpu(self, inputs):
# np.savetxt('cnet_conv_for_in_ori.txt', inputs[0].flatten(), fmt='%f', delimiter=',')
n, c, h, w = inputs[0].shape
# print("forward input shape " + str(inputs[0].shape))
# np.savetxt('cnet_conv_for_in_ori.txt', inputs[0].flatten(), fmt='%f', delimiter=',')
x = _as_mat(inputs[0])
# np.savetxt('cnet_conv_for_x_ori.txt', x.flatten(), fmt='%f', delimiter=',')
# x = inputs[0].flatten()
# for i in n * c * h * w:
# if i % (28 * 28) is 0:
# print("\n"),
# print("%f ", x[i]),
# with open('input_list.txt', 'a') as out_file:
# for i in range(2):
# input_line = "\ninput" + str(i)
# out_file.write(input_line)
# input = trainset[i][0].flatten()
# print(input)
# p_input = []
# for j in range(np.size(input)):
# p_input += str(input[j]) + ' '
W = inputs[1]
b = inputs[2]
out_c, _, kh, kw = W.shape
out_w = (w + 2 * self.ph - kh) / self.sx + 1
out_h = (h + 2 * self.ph - kh) / self.sx + 1
# print(self.ph, self.sx)
# print(kh, kw)
# print(out_h, out_w)
output_size = out_h * out_w * out_c * n
o = (c_float * output_size)()
extra_size = c * kh * kh * out_w * out_h
e = (c_float * extra_size)()
l = layer()
conv_p = conv_layer_t()
l.batch = n
l.input = data_t(c * h * w, cast(x.ctypes.data, POINTER(c_float)))
# c_in = np.ctypeslib.as_ctypes(x)
# l.input = data_t(n * c * h * w, c_in)
l.output = data_t(out_h * out_w * out_c, o)
l.bias = data_t(np.size(b), cast(b.ctypes.data, POINTER(c_float)))
l.weight = data_t(np.size(W), cast(W.ctypes.data, POINTER(c_float)))
l.extra = data_t(extra_size, e)
conv_p.ih = h
conv_p.iw = w
conv_p.ic = c
conv_p.oh = out_h
conv_p.ow = out_w
conv_p.oc = out_c
conv_p.k = kh
conv_p.s = self.sx
conv_p.p = self.ph
# print("ih"+ conv_p.ih + "iw"+ conv_p.iw + "ic"+ conv_p.ic + "oh"+ conv_p.oh + "ow"+ conv_p.ow + "oc"+ conv_p.oc + "k"+ conv_p.k + "s"+ conv_p.s + "p"+ conv_p.p)
# variables
# l.h = h
# l.w = w
# l.c = c
# l.n = out_c # filter sizeo = (c_float * output_size)()
# l.groups = 1
# l.stride = self.sx
# l.size = kh
# l.pad = self.ph
# l.outputs = l.out_h * l.out_w * l.out_c
# l.inputs = l.w * l.h * l.c
# print("forward conv layer(c)")
forward_convolutional_layer(byref(l), byref(conv_p))
# print("end conv layer")
y = np.ctypeslib.as_array((c_float * (out_h * out_w * out_c * n)).from_address(addressof(o)))
# print(y)
y = np.reshape(y, (n, out_c, out_h, out_w))
returnY = np.copy(y)
# np.savetxt('cnet_conv_for_x.txt', x.flatten(), fmt='%f', delimiter=',')
# np.savetxt('cnet_conv_for_W.txt', W.flatten(), fmt='%f', delimiter=',')
# np.savetxt('cnet_conv_for_b.txt', b.flatten(), fmt='%f', delimiter=',')
# time.sleep(0.1)
# sec = time.time()
# name = 'cnet_conv_for' + str(sec) + ".npy"
# np.save(name, returnY.flatten())
# print("conv forward output shape " + str(returnY.shape))
return returnY,
def forward_gpu(self, inputs):
n, c, h, w = inputs[0].shape
# x = _as_mat(inputs[0])
nx = cupy.asnumpy(inputs[0])
nx = nx.flatten()
# print(np.shape(nx))
# for i in range(n * c * h * w):
# if i % (28 * 28) is 0:
# print("\n"),
# print(nx[i]),
# x = inputs[0]
W = inputs[1]
out_c, _, kh, kw = W.shape
W = W.flatten()
nW = cupy.asnumpy(W)
b = inputs[2]
nb = cupy.asnumpy(b)
out_w = (w + 2 * self.ph - kh) / self.sx + 1
out_h = (h + 2 * self.ph - kh) / self.sx + 1
output_size = out_h * out_w * out_c * n
o = (c_float * output_size)()
extra_size = c * kh * kh * out_w * out_h
e = (c_float * extra_size)()
l = layer()
conv_p = conv_layer_t()
l.batch = n
l.input = data_t(c * h * w, cast(nx.ctypes.data, POINTER(c_float)))
# print(n*c*h*w)
l.output = data_t(out_h * out_w * out_c, o)
l.bias = data_t(np.size(nb), cast(nb.ctypes.data, POINTER(c_float)))
l.weight = data_t(np.size(nW), cast(nW.ctypes.data, POINTER(c_float)))
l.extra = data_t(extra_size, e)
conv_p.ih = h
conv_p.iw = w
conv_p.ic = c
conv_p.oh = out_h
conv_p.ow = out_w
conv_p.oc = out_c
conv_p.k = kh
conv_p.s = self.sx
conv_p.p = self.ph
# string = "ih" + str(conv_p.ih) + "iw" + str(conv_p.iw) + "ic" + str(conv_p.ic) + "oh" + str(conv_p.oh) + "ow" + str(conv_p.ow) + "oc" + str(conv_p.oc) + "k" + str(conv_p.k) + "s" + str(conv_p.s) + "p" + str(conv_p.p)
# print(string)
# print("forward conv layer")
forward_convolutional_layer(byref(l), byref(conv_p))
# print("end conv layer")
y = np.ctypeslib.as_array((c_float * (out_h * out_w * out_c * n)).from_address(addressof(o)))
y = np.reshape(y, (n, out_c, out_h, out_w))
cy = cupy.asarray(y)
# print(y.shape)
# print("conv for")
# print(cy)
return cy,
def backward_cpu(self, inputs, grad_outputs):
# x, W = inputs[:2]
# print("back conv")
x = _as_mat(inputs[0])
W = inputs[1].flatten()
b = inputs[2]
n, c, h, w = inputs[0].shape
out_c, _, kh, kw = inputs[1].shape
out_w = (w + 2 * self.ph - kh) / self.sx + 1
out_h = (h + 2 * self.ph - kh) / self.sx + 1
T_batch = n
bias_grad = (c_float * np.size(b))()
weight_grad = (c_float * np.size(W))()
input_grad = (c_float * np.size(x))()
extra_size = c * kh * kh * out_w * out_h
e_grad = (c_float * extra_size)()
e = (c_float * extra_size)()
# print(grad_outputs[0].shape)
l = layer()
conv_p = conv_layer_t()
l.batch = T_batch
l.input = data_t(c * h * w, cast(x.ctypes.data, POINTER(c_float)), input_grad)
l.output = data_t(out_h * out_w * out_c, grad=cast(grad_outputs[0].ctypes.data, POINTER(c_float)))
l.bias = data_t(np.size(b), cast(b.ctypes.data, POINTER(c_float)), bias_grad)
l.weight = data_t(np.size(W), cast(W.ctypes.data, POINTER(c_float)), weight_grad)
l.extra = data_t(extra_size, e, e_grad)
conv_p.ih = h
conv_p.iw = w
conv_p.ic = c
conv_p.oh = out_h
conv_p.ow = out_w
conv_p.oc = out_c
conv_p.k = kh
conv_p.s = self.sx
conv_p.p = self.ph
# print("backward conv layer (c)")
backward_convolutional_layer(byref(l), byref(conv_p))
# print("end conv layer")
gx = np.ctypeslib.as_array((c_float * np.size(x)).from_address(addressof(input_grad)))
gW = np.ctypeslib.as_array((c_float * np.size(W)).from_address(addressof(weight_grad)))
gb = np.ctypeslib.as_array((c_float * np.size(b)).from_address(addressof(bias_grad)))
gx = gx.reshape(inputs[0].shape)
gW = gW.reshape(inputs[1].shape)
rgx = np.copy(gx)
rgW = np.copy(gW)
rgb = np.copy(gb)
# time.sleep(0.1)
# sec = time.time()
# namex = 'cnet_conv_back_x' + str(sec) + ".npy"
# namew = 'cnet_conv_back_w' + str(sec) + ".npy"
# nameb = 'cnet_conv_back_b' + str(sec) + ".npy"
# np.save(namex, rgx.flatten())
# np.save(namew, rgW.flatten())
# np.save(nameb, rgb.flatten())
return rgx, rgW, rgb
def backward_gpu(self, inputs, grad_outputs):
x = _as_mat(inputs[0])
nx = cupy.asnumpy(x)
W = inputs[1].flatten()
nW = cupy.asnumpy(W)
b = inputs[2]
nb = cupy.asnumpy(b)
out_g = cupy.asnumpy(grad_outputs)
# print(grad_outputs[0].shape)
n, c, h, w = inputs[0].shape
out_c, _, kh, kw = inputs[1].shape
out_w = (w + 2 * self.ph - kh) / self.sx + 1
out_h = (h + 2 * self.ph - kh) / self.sx + 1
T_batch = n
bias_grad = (c_float * np.size(b))()
weight_grad = (c_float * np.size(W))()
input_grad = (c_float * np.size(x))()
extra_size = c * kh * kh * out_w * out_h
e_grad = (c_float * extra_size)()
e = (c_float * extra_size)()
l = layer()
conv_p = conv_layer_t()
| |
"""
sets
~~~~~
The `sets` module contains a standard collection, :class:`Set`, which is based
on Python's built-in set type.
Its elements are stored in a Redis `set <http://redis.io/commands#set>`_
structure.
"""
import collections.abc as collections_abc
from functools import reduce
import operator
from redis.client import Pipeline
from .base import RedisCollection
class Set(RedisCollection, collections_abc.MutableSet):
"""
Collection based on the built-in Python :class:`set` type.
Items are stored in a Redis hash structure.
See Python's `set documentation
<https://docs.python.org/3/library/stdtypes.html#set>`_ for usage notes.
"""
_pickle = RedisCollection._pickle_3
def __init__(self, *args, **kwargs):
"""
Create a new Set object.
If the first argument (*data*) is an iterable object, create the new
Set with its elements as the initial data.
:param data: Initial data.
:type data: iterable
:param redis: Redis client instance. If not provided, default Redis
connection is used.
:type redis: :class:`redis.StrictRedis`
:param key: Redis key for the collection. Collections with the same key
point to the same data. If not provided, a random
string is generated.
:type key: str
"""
data = args[0] if args else kwargs.pop('data', None)
super().__init__(**kwargs)
if data:
self.update(data)
def _data(self, pipe=None):
pipe = self.redis if pipe is None else pipe
if isinstance(pipe, Pipeline):
pipe.smembers(self.key)
members = pipe.execute()[-1]
else:
members = pipe.smembers(self.key)
return (self._unpickle(x) for x in members)
def _repr_data(self):
items = (repr(v) for v in self.__iter__())
return '{{{}}}'.format(', '.join(items))
# Magic methods
def __contains__(self, value, pipe=None):
"""Test for membership of *value* in the set."""
pipe = self.redis if pipe is None else pipe
if isinstance(pipe, Pipeline):
pipe.sismember(self.key, self._pickle(value))
is_member = pipe.execute()[-1]
else:
is_member = pipe.sismember(self.key, self._pickle(value))
return bool(is_member)
def __iter__(self, pipe=None):
"""Return an iterator over elements of the set."""
pipe = self.redis if pipe is None else pipe
return self._data(pipe)
def __len__(self, pipe=None):
"""Return cardinality of the set."""
pipe = self.redis if pipe is None else pipe
if isinstance(pipe, Pipeline):
pipe.scard(self.key)
ret = pipe.execute()[-1]
else:
ret = pipe.scard(self.key)
return ret
# Named methods
def add(self, value):
"""Add element *value* to the set."""
# Raise TypeError if value is not hashable
hash(value)
self.redis.sadd(self.key, self._pickle(value))
def copy(self, key=None):
other = self.__class__(redis=self.redis, key=key)
other.update(self)
return other
def clear(self, pipe=None):
"""Remove all elements from the set."""
self._clear(pipe)
def discard(self, value):
"""Remove element *value* from the set if it is present."""
# Raise TypeError if value is not hashable
hash(value)
self.redis.srem(self.key, self._pickle(value))
def isdisjoint(self, other):
"""
Return ``True`` if the set has no elements in common with *other*.
Sets are disjoint if and only if their intersection is the empty set.
:param other: Any kind of iterable.
:rtype: boolean
"""
def isdisjoint_trans_pure(pipe):
pipe.multi()
pipe.sinter(self.key, other.key)
result = pipe.execute()[-1]
return not result
def isdisjoint_trans_mixed(pipe):
pipe = pipe.multi()
self_values = set(self.__iter__(pipe))
if use_redis:
other_values = set(other.__iter__(pipe))
else:
other_values = set(other)
return self_values.isdisjoint(other_values)
if self._same_redis(other):
return self._transaction(isdisjoint_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(isdisjoint_trans_mixed, other.key)
use_redis = False
return self._transaction(isdisjoint_trans_mixed)
def pop(self):
"""
Remove and return an arbitrary element from the set.
Raises :exc:`KeyError` if the set is empty.
"""
result = self.redis.spop(self.key)
if result is None:
raise KeyError
return self._unpickle(result)
def random_sample(self, k=1):
"""
Return a *k* length list of unique elements chosen from the Set.
Elements are not removed. Similar to :func:`random.sample` function
from standard library.
:param k: Size of the sample, defaults to 1.
:rtype: :class:`list`
"""
# k == 0: no work to do
if k == 0:
results = []
# k == 1: same behavior on all versions of Redis
elif k == 1:
results = [self.redis.srandmember(self.key)]
# k != 1, Redis version >= 2.6: compute in Redis
else:
results = self.redis.srandmember(self.key, k)
return [self._unpickle(x) for x in results]
def remove(self, value):
"""
Remove element *value* from the set. Raises :exc:`KeyError` if it
is not contained in the set.
"""
# Raise TypeError if value is not hashable
hash(value)
result = self.redis.srem(self.key, self._pickle(value))
if not result:
raise KeyError(value)
def scan_elements(self):
"""
Yield each of the elements from the collection, without pulling them
all into memory.
.. warning::
This method is not available on the set collections provided
by Python.
This method may return the element multiple times.
See the `Redis SCAN documentation
<http://redis.io/commands/scan#scan-guarantees>`_ for details.
"""
for x in self.redis.sscan_iter(self.key):
yield self._unpickle(x)
# Comparison and set operation helpers
def _ge_helper(self, other, op, check_type=False):
if check_type and not isinstance(other, collections_abc.Set):
raise TypeError
def ge_trans_pure(pipe):
pipe.multi()
if not op(self.__len__(pipe), other.__len__(pipe)):
return False
pipe.sdiff(other.key, self.key)
sdiff = pipe.execute()[-1]
return not sdiff
def ge_trans_mixed(pipe):
pipe.multi()
len_other = other.__len__(pipe) if use_redis else len(other)
if not op(self.__len__(pipe), len_other):
return False
values = set(other.__iter__(pipe)) if use_redis else set(other)
return all(self.__contains__(v, pipe=pipe) for v in values)
if self._same_redis(other):
return self._transaction(ge_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(ge_trans_mixed, other.key)
use_redis = False
return self._transaction(ge_trans_mixed)
def _le_helper(self, other, op, check_type=False):
if check_type and not isinstance(other, collections_abc.Set):
raise TypeError
def le_trans_pure(pipe):
pipe.multi()
if not op(self.__len__(pipe), other.__len__(pipe)):
return False
pipe.sdiff(self.key, other.key)
sdiff = pipe.execute()[-1]
return not sdiff
def le_trans_mixed(pipe):
pipe.multi()
len_other = other.__len__(pipe) if use_redis else len(other)
if not op(self.__len__(pipe), len_other):
return False
values = set(other.__iter__(pipe)) if use_redis else set(other)
return all(v in values for v in self.__iter__(pipe))
if self._same_redis(other):
return self._transaction(le_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(le_trans_mixed, other.key)
use_redis = False
return self._transaction(le_trans_mixed)
def _op_update_helper(
self, others, op, redis_op, update=False, check_type=False
):
if (
check_type and
not all(isinstance(x, collections_abc.Set) for x in others)
):
raise TypeError
def op_update_trans_pure(pipe):
pipe.multi()
method = getattr(pipe, redis_op)
if not update:
method(self.key, *other_keys)
result = pipe.execute()[-1]
return {self._unpickle(x) for x in result}
temp_key = self._create_key()
method(temp_key, self.key, *other_keys)
pipe.rename(temp_key, self.key)
def op_update_trans_mixed(pipe):
pipe.multi()
self_values = set(self.__iter__(pipe))
other_values = []
for other in others:
if isinstance(other, RedisCollection):
other_values.append(set(other.__iter__(pipe)))
else:
other_values.append(set(other))
if not update:
return reduce(op, other_values, self_values)
new_values = reduce(op, other_values, self_values)
pipe.delete(self.key)
for v in new_values:
pipe.sadd(self.key, self._pickle(v))
other_keys = []
all_redis_sets = True
for other in others:
if self._same_redis(other):
other_keys.append(other.key)
elif self._same_redis(other, RedisCollection):
other_keys.append(other.key)
all_redis_sets = False
else:
all_redis_sets = False
if all_redis_sets:
return self._transaction(op_update_trans_pure, *other_keys)
return self._transaction(op_update_trans_mixed, *other_keys)
def _rop_helper(self, other, op):
if not isinstance(other, collections_abc.Set):
raise TypeError
return op(set(other), set(self.__iter__()))
def _xor_helper(self, other, update=False, check_type=False):
if check_type and not isinstance(other, collections_abc.Set):
raise TypeError
def xor_trans_pure(pipe):
pipe.multi()
diff_1_key = self._create_key()
pipe.sdiffstore(diff_1_key, self.key, other.key)
diff_2_key = self._create_key()
pipe.sdiffstore(diff_2_key, other.key, self.key)
if update:
pipe.sunionstore(self.key, diff_1_key, diff_2_key)
ret = None
else:
pipe.sunion(diff_1_key, diff_2_key)
ret = pipe.execute()[-1]
ret = {self._unpickle(x) for x in ret}
pipe.delete(diff_1_key, diff_2_key)
return ret
def xor_trans_mixed(pipe):
pipe.multi()
self_values = set(self.__iter__(pipe))
if use_redis:
other_values = set(other.__iter__(pipe))
else:
other_values = set(other)
result = self_values ^ other_values
if update:
pipe.delete(self.key)
pipe.sadd(self.key, *(self._pickle(x) for x in result))
return None
return result
if self._same_redis(other):
return self._transaction(xor_trans_pure, other.key)
elif self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(xor_trans_mixed, other.key)
use_redis = False
return self._transaction(xor_trans_mixed)
# Intersection
def __and__(self, other):
return self._op_update_helper(
(other,), operator.and_, 'sinter', check_type=True
)
def __rand__(self, other):
return self._rop_helper(other, operator.and_)
def __iand__(self, other):
self._op_update_helper(
(other,),
operator.and_,
'sinterstore',
update=True,
check_type=True,
)
return self
def intersection(self, *others):
"""
Return a new set with elements common to the set and all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: :class:`set`
.. note::
The same behavior as at :func:`union` applies.
"""
return self._op_update_helper(tuple(others), operator.and_, 'sinter')
def intersection_update(self, *others):
"""
Update the set, keeping only elements found in it and all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
The same behavior as at :func:`difference_update` applies.
"""
return self._op_update_helper(
tuple(others), operator.and_, 'sinterstore', update=True
)
# Comparison
def __ge__(self, other):
return self._ge_helper(other, operator.ge, check_type=True)
def issuperset(self, other):
"""
Test whether every element in other is in the set.
:param other: Any kind of iterable.
:rtype: boolean
"""
return self._ge_helper(other, operator.ge)
def __gt__(self, other):
return self._ge_helper(other, operator.gt, check_type=True)
def __eq__(self, other):
return self._le_helper(other, operator.eq, check_type=True)
def __le__(self, other):
return self._le_helper(other, operator.le, check_type=True)
def issubset(self, other):
"""
Test whether every element in the set is in *other*.
:param other: Any kind of iterable.
:rtype: boolean
"""
return self._le_helper(other, operator.le)
def __lt__(self, other):
return self._le_helper(other, operator.lt)
# Union
def __or__(self, | |
0],
optical_axis_OHB[t, 1],
optical_axis_OHB[t, 2],
Time_OHB[t],
)
optical_axis_OHB_ECEF[t, :] = optical_axis_OHB_ECEF[t, :] / norm(
optical_axis_OHB_ECEF[t, :]
)
(
r_MATS_OHB_ECEF[t, 0],
r_MATS_OHB_ECEF[t, 1],
r_MATS_OHB_ECEF[t, 2],
) = MATS_coordinates.eci2ecef(
r_MATS_OHB[t, 0], r_MATS_OHB[t, 1], r_MATS_OHB[t, 2], Time_OHB[t]
)
(
lat_MATS_OHB[t],
long_MATS_OHB[t],
alt_MATS_OHB[t],
) = MATS_coordinates.ECEF2lla(
r_MATS_OHB_ECEF[t, 0], r_MATS_OHB_ECEF[t, 1], r_MATS_OHB_ECEF[t, 2]
)
(
r_LP_OHB_ECEF[t, 0],
r_LP_OHB_ECEF[t, 1],
r_LP_OHB_ECEF[t, 2],
) = MATS_coordinates.ecef2tanpoint(
r_MATS_OHB_ECEF[t, 0],
r_MATS_OHB_ECEF[t, 1],
r_MATS_OHB_ECEF[t, 2],
optical_axis_OHB_ECEF[t, 0],
optical_axis_OHB_ECEF[t, 1],
optical_axis_OHB_ECEF[t, 2],
)
lat_LP_OHB[t], long_LP_OHB[t], alt_LP_OHB[t] = MATS_coordinates.ECEF2lla(
r_LP_OHB_ECEF[t, 0], r_LP_OHB_ECEF[t, 1], r_LP_OHB_ECEF[t, 2]
)
# R_earth_MATS[t][t] = norm(r_MATS_OHB[t,:]*1000)-alt_MATS_OHB[t]
Time_MPL_OHB[t] = date2num(Time_OHB[-1])
"######### END OF OHB DATA CALCULATIONS #########################"
"#####################################################################################"
"########################## STK DATA ################################################"
"####################################################################################"
Time_STK = []
if not (STK_CSV_FILE == ""):
Logger.info("Calculations of STK Data")
with open(STK_CSV_FILE) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
# interestingrows=[row for idx, row in enumerate(csv_reader) if idx in range(start_from,100000)]
row_count = sum(1 for row in csv_reader) - 1
r_MATS_STK_km = zeros((row_count, 3))
Vel_MATS_STK = zeros((row_count, 3))
r_MATS_STK_ECEF = zeros((row_count, 3))
with open(STK_CSV_FILE) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
# elif( row_count % timestep != 0):
# row_count += 1
else:
try:
r_MATS_STK_km[line_count - 1, 0] = row[0]
r_MATS_STK_km[line_count - 1, 1] = row[1]
r_MATS_STK_km[line_count - 1, 2] = row[2]
Vel_MATS_STK[line_count - 1, 0] = row[3]
Vel_MATS_STK[line_count - 1, 1] = row[4]
Vel_MATS_STK[line_count - 1, 2] = row[5]
Time_STK.append(
datetime.datetime.strptime(row[6], "%d %b %Y %H:%M:%S.%f")
)
line_count += 1
except IndexError:
break
Time_MPL_STK = date2num(Time_STK[:])
x_MATS_error_STK = []
y_MATS_error_STK = []
z_MATS_error_STK = []
total_r_MATS_error_STK = []
Time_error_STK_MPL = []
"Calculate error between STK DATA and Predicted from Science Mode Timeline data when timestamps are the same"
for t2 in range(len(Time_STK)):
(
r_MATS_STK_ECEF[t2, 0],
r_MATS_STK_ECEF[t2, 1],
r_MATS_STK_ECEF[t2, 2],
) = MATS_coordinates.eci2ecef(
r_MATS_STK_km[t2, 0] * 1000,
r_MATS_STK_km[t2, 1] * 1000,
r_MATS_STK_km[t2, 2] * 1000,
Time_STK[t2],
)
for t in range(len(Time)):
if Time_MPL_STK[t2] == Time_MPL[t]:
x_MATS_error_STK.append(
abs(Data_MATS["r_MATS_ECEF [m]"][t, 0] - r_MATS_STK_ECEF[t2, 0])
)
y_MATS_error_STK.append(
abs(Data_MATS["r_MATS_ECEF [m]"][t, 1] - r_MATS_STK_ECEF[t2, 1])
)
z_MATS_error_STK.append(
abs(Data_MATS["r_MATS_ECEF [m]"][t, 2] - r_MATS_STK_ECEF[t2, 2])
)
total_r_MATS_error_STK.append(
norm(
(
x_MATS_error_STK[len(x_MATS_error_STK) - 1],
y_MATS_error_STK[len(y_MATS_error_STK) - 1],
z_MATS_error_STK[len(z_MATS_error_STK) - 1],
)
)
)
Time_error_STK_MPL.append(Time_MPL_STK[t2])
break
fig = figure()
plot_date(Time_error_STK_MPL[:], x_MATS_error_STK[:], markersize=1, label="x")
plot_date(Time_error_STK_MPL[:], y_MATS_error_STK[:], markersize=1, label="y")
plot_date(Time_error_STK_MPL[:], z_MATS_error_STK[:], markersize=1, label="z")
xlabel("Date")
ylabel("Meters")
title("Absolute error in ECEF position of MATS in m (prediction vs STK data")
legend()
figurePath = os.path.join(figureDirectory, "PosErrorMATS_STK")
pickle.dump(fig, open(figurePath + ".fig.pickle", "wb"))
close()
"########################## End of STK DATA ################################################"
"####################################################################################"
"########################## Plotter ###########################################"
"##############################################################################"
from mpl_toolkits.mplot3d import axes3d
fig = figure()
ax = fig.add_subplot(111, projection="3d")
ax.set_xlim3d(-7000000, 7000000)
ax.set_ylim3d(-7000000, 7000000)
ax.set_zlim3d(-7000000, 7000000)
GravParameter = 3.986 * 10 ** 14
OrbitalPeriod = (
2 * pi * sqrt((Data_MATS["alt_MATS [m]"][0] + 6371000) ** 3 / GravParameter)
)
DataTo = int(OrbitalPeriod * (3 / 4) / (Time[1] - Time[0]).total_seconds())
ax.scatter(
Data_MATS["r_MATS_ECEF [m]"][1:DataTo, 0],
Data_MATS["r_MATS_ECEF [m]"][1:DataTo, 1],
Data_MATS["r_MATS_ECEF [m]"][1:DataTo, 2],
label="MATS",
)
ax.scatter(
Data_LP["r_LP_ECEF [m]"][1:DataTo, 0],
Data_LP["r_LP_ECEF [m]"][1:DataTo, 1],
Data_LP["r_LP_ECEF [m]"][1:DataTo, 2],
label="LP",
)
title(
"Positional data in m (ECEF) from Science Mode Timeline of LP and MATS for 3/4 of an orbit"
)
legend()
close()
fig = figure()
plot_date(
Time_MPL[:],
Data_MATS["ScienceMode"][:],
markersize=1,
label="Predicted from Science Mode Timeline",
)
xlabel("Date")
ylabel("Active ScienceMode")
legend()
figurePath = os.path.join(figureDirectory, "ActiveScienceMode")
pickle.dump(fig, open(figurePath + ".fig.pickle", "wb"))
close()
"""
figure()
scatter(Time[:], Data_MATS['yaw_MATS [degrees]'][:], s=10, c=Data_MATS['ColorRGB'], label = 'Predicted from Science Mode Timeline')
#scatter(Time_OHB[:],Euler_angles_SLOF_OHB[:,0], s=10, c='r', marker="x", label = 'OHB-Data')
xlabel('Date')
ylabel('Yaw in degrees [z-axis SLOF]')
legend()
"""
"""
from pylab import plt
fig, axs = plt.subplots(1, 1)
scatter(Time[:], Data_MATS['yaw_MATS [degrees]'][:], s=10, c=Data_MATS['ColorRGB'], label = 'Predicted from Science Mode Timeline')
#scatter(Time_OHB[:],Euler_angles_SLOF_OHB[:,0], s=10, c='r', marker="x", label = 'OHB-Data')
xlabel('Date')
ylabel('Yaw in degrees [z-axis SLOF]')
legend()
"""
fig = figure()
plot_date(
Time_MPL[:],
Data_MATS["yaw_MATS [degrees]"][:],
markersize=1,
label="Predicted from Science Mode Timeline",
)
plot_date(
Time_MPL_OHB[:], Euler_angles_SLOF_OHB[:, 0], markersize=1, label="OHB-H5-Data"
)
plot_date(
Time_MPL[:],
Data_MATS["Yaw_function [degrees]"][:],
markersize=1,
label="Yaw-function (without attitude freezes)",
)
xlabel("Date")
ylabel("Degrees")
title("Yaw of optical-axis [z-axis SLOF (towards earth)]")
legend()
figurePath = os.path.join(figureDirectory, "Yaw")
pickle.dump(fig, open(figurePath + ".fig.pickle", "wb"))
close()
fig = figure()
plot_date(
Time_MPL[:],
Data_MATS["pitch_MATS [degrees]"][:],
markersize=1,
label="Predicted from Science Mode Timeline",
)
plot_date(
Time_MPL_OHB[:], Euler_angles_SLOF_OHB[:, 1], markersize=1, label="OHB-H5-Data"
)
xlabel("Date")
ylabel("Degrees")
title("Pitch of optical-axis [intrinsic y-axis SLOF]")
legend()
figurePath = os.path.join(figureDirectory, "Pitch")
pickle.dump(fig, open(figurePath + ".fig.pickle", "wb"))
close()
fig = figure()
plot_date(
Time_MPL[:],
Data_MATS["roll_MATS [degrees]"][:],
markersize=1,
label="Predicted from Science Mode Timeline",
)
plot_date(
Time_MPL_OHB[:], Euler_angles_SLOF_OHB[:, 2], markersize=1, label="OHB-H5-Data"
)
xlabel("Date")
ylabel("Degrees")
ylabel("Roll of optical-axis [intrinsic z-axis SLOF]")
legend()
figurePath = os.path.join(figureDirectory, "Roll")
pickle.dump(fig, open(figurePath + ".fig.pickle", "wb"))
close()
###################################
fig = figure()
plot_date(
Time_MPL[:],
Data_MATS["lat_MATS [degrees]"][:],
markersize=1,
label="Predicted from Science Mode Timeline",
)
plot_date(Time_MPL_OHB[:], lat_MATS_OHB[:], markersize=1, label="OHB-H5-Data")
xlabel("Date")
ylabel("Degrees")
title("Geodetic Latitude of MATS")
legend()
figurePath = os.path.join(figureDirectory, "Lat")
pickle.dump(fig, open(figurePath + ".fig.pickle", "wb"))
close()
"""
for t in range(len(lat_MATS_STK_FIXED)):
abs_lat_MATS_error_STK[t] = abs( lat_MATS_STK_FIXED[t] - Data_MATS['lat_MATS [degrees]'][t] )
abs_lat_MATS_error_OHB[t] = abs( lat_MATS_OHB[t] - Data_MATS['lat_MATS [degrees]'][t] )
abs_long_MATS_error_STK[t] = abs( long_MATS_STK_FIXED[t] - Data_MATS['long_MATS [degrees]'][t] )
abs_long_MATS_error_OHB[t] = abs( long_MATS_OHB[t] - Data_MATS['long_MATS [degrees]'][t] )
fig = figure()
plot_date(current_time_MPL_STK[1:], abs_lat_MATS_error_STK[1:], markersize = 1, label = 'Prediction vs STK')
plot_date(Time_MPL_OHB[1:], abs_lat_MATS_error_OHB[1:], markersize = 1, label = 'Prediction vs OHB')
xlabel('Date')
ylabel('Absolute error in Latitude of MATS (Fixed) in degrees')
legend()
"""
fig = figure()
plot_date(
Time_MPL[:],
Data_MATS["long_MATS [degrees]"][:],
markersize=1,
label="Predicted from Science Mode Timeline",
)
# plot_date(current_time_MPL_STK[1:], long_MATS_STK_FIXED[1:], markersize = 1, label = 'STK-Data_Fixed')
plot_date(Time_MPL_OHB[:], long_MATS_OHB[:], markersize=1, label="OHB-H5-Data")
xlabel("Date")
ylabel("Degrees")
title("Longitude of MATS in degrees")
legend()
figurePath = os.path.join(figureDirectory, "Long")
pickle.dump(fig, open(figurePath + ".fig.pickle", "wb"))
close()
"""
fig = figure()
plot_date(current_time_MPL[1:], abs_long_MATS_error_STK[1:], markersize = 1, label = 'Prediction vs STK')
plot_date(Time_MPL_OHB[1:], abs_long_MATS_error_STK[1:], markersize = 1, label = 'Prediction vs OHB')
xlabel('Date')
ylabel('Absolute error in Longitude of MATS (Fixed) in degrees')
legend()
"""
fig = figure()
plot_date(
Time_MPL[:],
Data_MATS["alt_MATS [m]"][:],
markersize=1,
label="Predicted from Science Mode Timeline",
)
plot_date(Time_MPL_OHB[:], alt_MATS_OHB[:], markersize=1, label="OHB-H5-Data")
xlabel("Date")
ylabel("Meters")
title("Altitude of MATS")
legend()
figurePath = os.path.join(figureDirectory, "Alt")
pickle.dump(fig, open(figurePath + ".fig.pickle", "wb"))
close()
fig = figure()
plot_date(
Time_MPL_OHB[:],
abs(r_MATS_OHB_ECEF[:, 0] - r_MATS_OHB_ECEFdata[:, 0]),
markersize=1,
label="X",
)
plot_date(
Time_MPL_OHB[:],
abs(r_MATS_OHB_ECEF[:, 1] - r_MATS_OHB_ECEFdata[:, 1]),
markersize=1,
label="Y",
)
plot_date(
Time_MPL_OHB[:],
abs(r_MATS_OHB_ECEF[:, 2] - r_MATS_OHB_ECEFdata[:, 2]),
markersize=1,
label="Z",
)
xlabel("Date")
ylabel("Meters")
title(
"Absolute error in ECEF positional data from h5 and converted J2000 data from h5 into ECEF"
)
legend()
figurePath = os.path.join(figureDirectory, "ECEFerror")
pickle.dump(fig, open(figurePath + ".fig.pickle", "wb"))
close()
####################################
"Allocate variables for error calculations between OHB data and predictions"
total_r_MATS_error_OHB = []
x_MATS_error_OHB = []
y_MATS_error_OHB = []
z_MATS_error_OHB = []
total_r_LP_error_OHB = []
x_LP_error_OHB = []
y_LP_error_OHB = []
z_LP_error_OHB = []
r_MATS_error_OHB_Radial = []
r_MATS_error_OHB_CrossTrack = []
r_MATS_error_OHB_InTrack = []
total_r_MATS_error_OHB_RCI = []
r_LP_error_OHB_Radial = []
r_LP_error_OHB_CrossTrack = []
r_LP_error_OHB_InTrack = []
total_r_LP_error_OHB_RCI = []
alt_LP_error = []
optical_axis_Dec_ERROR = []
optical_axis_RA_ERROR = []
Time_error_MPL = []
if OHB_H5_Path != "":
"Calculate error between OHB DATA and Predicted from Science Mode Timeline data when timestamps are the same"
for t2 in range(timesteps):
for t in range(len(Time)):
if Time_MPL_OHB[t2] == Time_MPL[t]:
x_MATS_error_OHB.append(
abs(Data_MATS["r_MATS_ECEF [m]"][t, 0] - r_MATS_OHB_ECEF[t2, 0])
)
y_MATS_error_OHB.append(
abs(Data_MATS["r_MATS_ECEF [m]"][t, 1] - r_MATS_OHB_ECEF[t2, 1])
)
z_MATS_error_OHB.append(
abs(Data_MATS["r_MATS_ECEF [m]"][t, 2] - r_MATS_OHB_ECEF[t2, 2])
)
total_r_MATS_error_OHB.append(
norm(
(
x_MATS_error_OHB[len(x_MATS_error_OHB) - 1],
y_MATS_error_OHB[len(y_MATS_error_OHB) - 1],
z_MATS_error_OHB[len(z_MATS_error_OHB) - 1],
)
)
)
x_LP_error_OHB.append(
abs(Data_LP["r_LP_ECEF [m]"][t, 0] - r_LP_OHB_ECEF[t2, 0])
)
y_LP_error_OHB.append(
abs(Data_LP["r_LP_ECEF [m]"][t, 1] - r_LP_OHB_ECEF[t2, 1])
)
z_LP_error_OHB.append(
abs(Data_LP["r_LP_ECEF [m]"][t, 2] - r_LP_OHB_ECEF[t2, 2])
)
total_r_LP_error_OHB.append(
norm(
(
x_LP_error_OHB[len(x_LP_error_OHB) - 1],
y_LP_error_OHB[len(y_LP_error_OHB) - 1],
z_LP_error_OHB[len(z_LP_error_OHB) - 1],
)
)
)
alt_LP_error.append(Data_LP["alt_LP [m]"][t] - alt_LP_OHB[t2])
optical_axis_Dec_ERROR.append(
abs(Data_MATS["optical_axis_Dec [degrees]"][t] - Dec_OHB[t2])
)
optical_axis_RA_ERROR.append(
abs(Data_MATS["optical_axis_RA [degrees]"][t] - RA_OHB[t2])
)
# in_track = cross( normal_orbit[t], r_MATS_unit_vector[t])
# r_MATS_unit_vector_ECEF = array( (Data_MATS['x_MATS_ECEF'][t], Data_MATS['y_MATS_ECEF'][t], Data_MATS['z_MATS_ECEF'][t]) )
# v_MATS_unit_vector_ECEF = array( (Data_MATS['vx_MATS_ECEF'][t], Data_MATS['vy_MATS_ECEF'][t], Data_MATS['vz_MATS_ECEF'][t]) ) / norm( array( (Data_MATS['vx_MATS_ECEF'][t], Data_MATS['vy_MATS_ECEF'][t], Data_MATS['vz_MATS_ECEF'][t]) ) )
r_MATS_unit_vector_ECEF = Data_MATS["r_MATS_ECEF [m]"][t] / norm(
Data_MATS["r_MATS_ECEF [m]"][t]
)
v_MATS_unit_vector_ECEF = Data_MATS["v_MATS_ECEF [km/s]"][t] / norm(
Data_MATS["v_MATS_ECEF [km/s]"][t]
)
UnitVectorBasis_RCI = transpose(
array(
(
(
r_MATS_unit_vector_ECEF[0],
Data_MATS["r_normal_orbit_ECEF"][t, 0],
v_MATS_unit_vector_ECEF[0],
),
(
r_MATS_unit_vector_ECEF[1],
Data_MATS["r_normal_orbit_ECEF"][t, 1],
v_MATS_unit_vector_ECEF[1],
),
(
| |
<reponame>andyharney/pySignare
# Copyright 2014 <NAME> (2014)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Define some global vars for later use
version = '1.1'
# APK Folders
usapks = './UnsignedApks/'
sapks = './SignedApks/'
zaapks = './ZipAlignedApks/'
# Key Folders
dgkeydir = './DebugKey/'
privkeydir = './PrivateKey/'
# Temp Folder
tmp = './tmp/'
def operatingsystem():
import sys
global keytool, zipalign, jarsigner, osplat, clsh, warning
if sys.platform.startswith('linux'):
clsh = 'clear'
osplat = 'Linux'
keytool = 'keytool-lin'
zipalign = 'zipalign-lin'
jarsigner = 'jarsigner-lin'
elif sys.platform.startswith('darwin'):
clsh = 'clear'
osplat = 'Mac'
keytool = 'keytool-mac'
zipalign = 'zipalign-mac'
jarsigner = 'jarsigner-mac'
elif sys.platform.startswith('win32'):
clsh = 'cls'
osplat = 'Windows'
keytool = 'keytool-win.exe'
zipalign = 'zipalign-win.exe'
jarsigner = 'jarsigner-win.exe'
warning = ''
# print(keytool, zipalign, jarsigner)
# print()
if not sys.platform.startswith('win32'):
warning = '''****** Warning ******
Linux & OSX Support is Experimental
Proceed At Your Own Risk'''
splash()
# Loads the splash, with a standard Apache Licence 2.0 disclaimer, and an acceptance option
def splash():
import sys
global warning
print()
print('pySignare v' + version + ' ' + osplat)
print('https://github.com/andyharney/pySignare')
print('')
print('Written Exclusively For All Members of XDA-Developers.com')
print()
print('by Andy')
print('http://forum.xda-developers.com/member.php?u=797171')
print()
print('''
Copyright 2014 <NAME> (2014)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.''')
print()
try:
if len(warning) != 0:
print(warning)
print()
lic = input('Do you accept the terms of the above licence? Y/N - ')
print()
assert isinstance(lic, object)
if lic.lower() == 'y':
del lic
checkjava()
else:
print('Without accepting the terms, you cannot continue')
input('Press Enter To Exit')
del lic
sys.exit()
except NameError as e:
print('An Error Occurred\n')
print(e)
print('\nAssuming You Wish to Continue. This May Not Work.')
checkjava()
def checkjava():
import sys
import subprocess
# Java is called, checking if its in the system path, if not. It should exit with a user prompt
# Not fully tested, java is installed on all my machines.
sjava = subprocess.call(['java', '-version'])
if sjava is None:
print('Java not installed or added to system path, please install')
input('Press Enter To Exit')
del sjava
# Exit gracefully
sys.exit()
else:
print()
del sjava
# Moves on to the output folder checking function
checkoutputfolder()
def checkoutputfolder():
import os
import os.path
global sapks, zaapks, privkeydir
# Create required output folders
if not os.path.isdir(sapks):
os.mkdir('SignedApks')
if not os.path.isdir(zaapks):
os.mkdir('ZipAlignedApks')
if not os.path.isdir(privkeydir):
os.mkdir('PrivateKey')
# All clear, now the main menu function is loaded.
mainmenu()
def mainmenu():
import os
global version, osplat, clsh
os.system(clsh)
print('pySignare v' + version + ' ' + osplat)
print()
print()
# Here the main menu is printed
linebreak = ('-' * 28)
print('Main Menu')
print(linebreak)
print('1 : Sign With Debug Key')
print('2 : Sign With Private Key')
print('3 : ZipAlign Signed APKs')
print(linebreak)
print('4 : Generate Private Key')
print(linebreak)
print('5 : Exit ')
print(linebreak)
del linebreak
# Runs the menu choice function
menuchoice()
def menuchoice():
import sys
# A Couple custom exceptions are created to catch any invalid/secret menu choices
class MenuError(Exception):
pass
class HiddenMenu(Exception):
pass
option = ''
# User enters option number, its validated as an integer and passes to the Options List
while True:
# This while loop catches the option choice and will only break if the choice is valid
try:
option = int(input('Please Choose an Option : '))
if 1 <= option <= 5:
break
elif option == 1729:
raise HiddenMenu()
else:
raise MenuError()
# If the option choice is not a valid integer or a character this exception is raised
except ValueError:
print('Please enter a valid option, whole numbers please.')
continue
# If a valid integer is entered, but not a valid menu option, this exception is raised
except MenuError:
print('Please enter a valid option, ' + str(option) + " isn't an option.")
# If 1729 is entered, this exception is raised.
# Ref: Srinivasa Ramanujan - http://en.wikipedia.org/wiki/Srinivasa_Ramanujan
except HiddenMenu:
print('Very Clever, but nothing is hidden. Yet')
# Menu choice logic
# Lack of case statements shows.
if 1 <= option <= 5:
# Options List
if option == 1:
del option
debugkeysign()
elif option == 2:
del option
privkeyprep()
del option
elif option == 3:
del option
zipalignfunc()
elif option == 4:
del option
genprivkey()
elif option == 5:
del option
# Exit gracefully
print('Quitting')
sys.exit()
print('Please Choose a Valid Option : ')
def debugkeysign():
import os
import os.path
import subprocess
global usapks, dgkeydir, sapks
os.system('cls')
print('\n' + 'Debug Key Signing' + '\n')
# Create List of APKs
apklist = os.listdir(usapks)
debugkeys = os.listdir(dgkeydir)
# If none are found exit with message
if len(apklist) == 0:
print('No APKs Found' + '\n')
input('Press Enter to Return to Menu')
print()
mainmenu()
else:
print('Found ' + str(len(apklist)) + ' APK' + '\n')
for APK in apklist:
print('Signing ' + APK)
subprocess.call(['java',
'-jar',
'./Files/signapk.jar',
dgkeydir + debugkeys[1],
dgkeydir + debugkeys[0],
usapks + APK,
sapks + APK
])
print()
print('Signing has finished, please check the messages above for any errors.')
input('Press Enter to continue')
print()
del apklist, debugkeys
mainmenu()
def privkeyprep():
class MenuError(Exception):
pass
import os
import shutil
global usapks, tmp, privkeydir
os.system('cls')
if os.path.isdir(tmp):
shutil.rmtree(tmp)
print('\n' + 'Private Key Signing' + '\n')
# Create List of APKs
apklist = os.listdir(usapks)
apkcount = len(apklist)
privkeylist = os.listdir(privkeydir)
privkeycount = len(privkeylist)
keychoice = 0
if privkeycount == 0:
print('No Private Keys Found' + '\n')
input('Press Enter to Return to Menu')
print()
mainmenu()
sorted(privkeylist)
if len(apklist) == 0:
print('No APKs Found' + '\n')
input('Press Enter to Return to Menu')
print()
mainmenu()
if privkeycount == 1:
chosenprivatekey = privkeylist
privatekeysigning(apklist, apkcount, chosenprivatekey[0])
else:
print(str(privkeycount) + ' Private Keys Found')
print()
# Thanks <NAME>
# http://stackoverflow.com/a/21962946/3303492
for keynum, privkey in enumerate(privkeylist, 1):
print('{} : {}'.format(keynum, privkey))
while True:
# This while loop catches the option choice and will only break if the choice is valid
try:
keychoice = int(input('Please Choose a Key : '))
print()
if 1 <= keychoice <= privkeycount:
break
else:
raise MenuError()
# If the option choice is not a valid integer or a character this exception is raised
except ValueError:
print('Please enter a valid option')
continue
# If a valid integer is entered, but not a valid menu option, this exception is raised
except MenuError:
print('Please enter a valid option, ' + str(keychoice) + " isn't an option.")
continue
chosenprivatekey = privkeylist[(keychoice - 1)]
print(chosenprivatekey + ' has been chosen')
print()
privatekeysigning(apklist, apkcount, chosenprivatekey)
def privatekeysigning(apklist, apkcount, chosenprivatekey):
import os
import shutil
import subprocess
global jarsigner, sapks, usapks, tmp, privkeydir
alias = str(str.rstrip(chosenprivatekey, '-private-key.keystore'))
signcount = 0
if len(apklist) == 1:
print('Found ' + str(apkcount) + ' APK' + '\n')
else:
print('Found ' + str(apkcount) + ' APKs' + '\n')
# Insert warning about entering the keystore password
print('Important, your KEYSTORE password is required to sign multiple apks at once.')
print("Your KEYSTORE password is not used outside of this menu option. It's cleared when signing is complete.")
print()
keystorepass = input('Please Enter your KEYSTORE Password : ')
print()
while signcount != apkcount:
for APK in apklist:
print('Signing ' + APK)
# Because private key signing is done "in-place" we backup the original apk, sign, move and restore
# Create tmp folder
os.mkdir('tmp')
# Copy unsigned apk out
shutil.copyfile(usapks + APK, tmp + APK)
subprocess.call(['./Files/' + jarsigner,
'-keystore',
| |
add_file_with_options_async(
self,
space_id: str,
request: dingtalkdrive__1__0_models.AddFileRequest,
headers: dingtalkdrive__1__0_models.AddFileHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.AddFileResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.parent_id):
body['parentId'] = request.parent_id
if not UtilClient.is_unset(request.file_type):
body['fileType'] = request.file_type
if not UtilClient.is_unset(request.file_name):
body['fileName'] = request.file_name
if not UtilClient.is_unset(request.media_id):
body['mediaId'] = request.media_id
if not UtilClient.is_unset(request.add_conflict_policy):
body['addConflictPolicy'] = request.add_conflict_policy
if not UtilClient.is_unset(request.union_id):
body['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.AddFileResponse(),
await self.do_roarequest_async('AddFile', 'drive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/drive/spaces/{space_id}/files', 'json', req, runtime)
)
def get_preview_info(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.GetPreviewInfoRequest,
) -> dingtalkdrive__1__0_models.GetPreviewInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.GetPreviewInfoHeaders()
return self.get_preview_info_with_options(space_id, file_id, request, headers, runtime)
async def get_preview_info_async(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.GetPreviewInfoRequest,
) -> dingtalkdrive__1__0_models.GetPreviewInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.GetPreviewInfoHeaders()
return await self.get_preview_info_with_options_async(space_id, file_id, request, headers, runtime)
def get_preview_info_with_options(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.GetPreviewInfoRequest,
headers: dingtalkdrive__1__0_models.GetPreviewInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.GetPreviewInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.union_id):
query['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.GetPreviewInfoResponse(),
self.do_roarequest('GetPreviewInfo', 'drive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/drive/spaces/{space_id}/files/{file_id}/previewInfos', 'json', req, runtime)
)
async def get_preview_info_with_options_async(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.GetPreviewInfoRequest,
headers: dingtalkdrive__1__0_models.GetPreviewInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.GetPreviewInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.union_id):
query['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.GetPreviewInfoResponse(),
await self.do_roarequest_async('GetPreviewInfo', 'drive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/drive/spaces/{space_id}/files/{file_id}/previewInfos', 'json', req, runtime)
)
def info_space(
self,
space_id: str,
request: dingtalkdrive__1__0_models.InfoSpaceRequest,
) -> dingtalkdrive__1__0_models.InfoSpaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.InfoSpaceHeaders()
return self.info_space_with_options(space_id, request, headers, runtime)
async def info_space_async(
self,
space_id: str,
request: dingtalkdrive__1__0_models.InfoSpaceRequest,
) -> dingtalkdrive__1__0_models.InfoSpaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.InfoSpaceHeaders()
return await self.info_space_with_options_async(space_id, request, headers, runtime)
def info_space_with_options(
self,
space_id: str,
request: dingtalkdrive__1__0_models.InfoSpaceRequest,
headers: dingtalkdrive__1__0_models.InfoSpaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.InfoSpaceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.union_id):
query['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.InfoSpaceResponse(),
self.do_roarequest('InfoSpace', 'drive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/drive/spaces/{space_id}', 'json', req, runtime)
)
async def info_space_with_options_async(
self,
space_id: str,
request: dingtalkdrive__1__0_models.InfoSpaceRequest,
headers: dingtalkdrive__1__0_models.InfoSpaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.InfoSpaceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.union_id):
query['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.InfoSpaceResponse(),
await self.do_roarequest_async('InfoSpace', 'drive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/drive/spaces/{space_id}', 'json', req, runtime)
)
def management_modify_space(
self,
request: dingtalkdrive__1__0_models.ManagementModifySpaceRequest,
) -> dingtalkdrive__1__0_models.ManagementModifySpaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.ManagementModifySpaceHeaders()
return self.management_modify_space_with_options(request, headers, runtime)
async def management_modify_space_async(
self,
request: dingtalkdrive__1__0_models.ManagementModifySpaceRequest,
) -> dingtalkdrive__1__0_models.ManagementModifySpaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.ManagementModifySpaceHeaders()
return await self.management_modify_space_with_options_async(request, headers, runtime)
def management_modify_space_with_options(
self,
request: dingtalkdrive__1__0_models.ManagementModifySpaceRequest,
headers: dingtalkdrive__1__0_models.ManagementModifySpaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.ManagementModifySpaceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.space_ids):
body['spaceIds'] = request.space_ids
if not UtilClient.is_unset(request.quota):
body['quota'] = request.quota
if not UtilClient.is_unset(request.union_id):
body['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.ManagementModifySpaceResponse(),
self.do_roarequest('ManagementModifySpace', 'drive_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/drive/managements/spaces', 'json', req, runtime)
)
async def management_modify_space_with_options_async(
self,
request: dingtalkdrive__1__0_models.ManagementModifySpaceRequest,
headers: dingtalkdrive__1__0_models.ManagementModifySpaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.ManagementModifySpaceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.space_ids):
body['spaceIds'] = request.space_ids
if not UtilClient.is_unset(request.quota):
body['quota'] = request.quota
if not UtilClient.is_unset(request.union_id):
body['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.ManagementModifySpaceResponse(),
await self.do_roarequest_async('ManagementModifySpace', 'drive_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/drive/managements/spaces', 'json', req, runtime)
)
def modify_permission(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.ModifyPermissionRequest,
) -> dingtalkdrive__1__0_models.ModifyPermissionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.ModifyPermissionHeaders()
return self.modify_permission_with_options(space_id, file_id, request, headers, runtime)
async def modify_permission_async(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.ModifyPermissionRequest,
) -> dingtalkdrive__1__0_models.ModifyPermissionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.ModifyPermissionHeaders()
return await self.modify_permission_with_options_async(space_id, file_id, request, headers, runtime)
def modify_permission_with_options(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.ModifyPermissionRequest,
headers: dingtalkdrive__1__0_models.ModifyPermissionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.ModifyPermissionResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.role):
body['role'] = request.role
if not UtilClient.is_unset(request.members):
body['members'] = request.members
if not UtilClient.is_unset(request.union_id):
body['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.ModifyPermissionResponse(),
self.do_roarequest('ModifyPermission', 'drive_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/drive/spaces/{space_id}/files/{file_id}/permissions', 'none', req, runtime)
)
async def modify_permission_with_options_async(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.ModifyPermissionRequest,
headers: dingtalkdrive__1__0_models.ModifyPermissionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.ModifyPermissionResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.role):
body['role'] = request.role
if not UtilClient.is_unset(request.members):
body['members'] = request.members
if not UtilClient.is_unset(request.union_id):
body['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.ModifyPermissionResponse(),
await self.do_roarequest_async('ModifyPermission', 'drive_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/drive/spaces/{space_id}/files/{file_id}/permissions', 'none', req, runtime)
)
def grant_privilege_of_custom_space(
self,
space_id: str,
request: dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceRequest,
) -> dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceHeaders()
return self.grant_privilege_of_custom_space_with_options(space_id, request, headers, runtime)
async def grant_privilege_of_custom_space_async(
self,
space_id: str,
request: dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceRequest,
) -> dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceHeaders()
return await self.grant_privilege_of_custom_space_with_options_async(space_id, request, headers, runtime)
def grant_privilege_of_custom_space_with_options(
self,
space_id: str,
request: dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceRequest,
headers: dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.type):
body['type'] = request.type
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.file_ids):
body['fileIds'] = request.file_ids
if not UtilClient.is_unset(request.duration):
body['duration'] = request.duration
if not UtilClient.is_unset(request.union_id):
body['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceResponse(),
self.do_roarequest('GrantPrivilegeOfCustomSpace', 'drive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/drive/spaces/{space_id}/files/customSpacePrivileges', 'none', req, runtime)
)
async def grant_privilege_of_custom_space_with_options_async(
self,
space_id: str,
request: dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceRequest,
headers: dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.type):
body['type'] = request.type
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.file_ids):
body['fileIds'] = request.file_ids
if not UtilClient.is_unset(request.duration):
body['duration'] = request.duration
if not UtilClient.is_unset(request.union_id):
body['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.GrantPrivilegeOfCustomSpaceResponse(),
await self.do_roarequest_async('GrantPrivilegeOfCustomSpace', 'drive_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/drive/spaces/{space_id}/files/customSpacePrivileges', 'none', req, runtime)
)
def get_download_info(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.GetDownloadInfoRequest,
) -> dingtalkdrive__1__0_models.GetDownloadInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.GetDownloadInfoHeaders()
return self.get_download_info_with_options(space_id, file_id, request, headers, runtime)
async def get_download_info_async(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.GetDownloadInfoRequest,
) -> dingtalkdrive__1__0_models.GetDownloadInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.GetDownloadInfoHeaders()
return await self.get_download_info_with_options_async(space_id, file_id, request, headers, runtime)
def get_download_info_with_options(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.GetDownloadInfoRequest,
headers: dingtalkdrive__1__0_models.GetDownloadInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.GetDownloadInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.union_id):
query['unionId'] = request.union_id
if not UtilClient.is_unset(request.with_region):
query['withRegion'] = request.with_region
if not UtilClient.is_unset(request.with_internal_resource_url):
query['withInternalResourceUrl'] = request.with_internal_resource_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.GetDownloadInfoResponse(),
self.do_roarequest('GetDownloadInfo', 'drive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/drive/spaces/{space_id}/files/{file_id}/downloadInfos', 'json', req, runtime)
)
async def get_download_info_with_options_async(
self,
space_id: str,
file_id: str,
request: dingtalkdrive__1__0_models.GetDownloadInfoRequest,
headers: dingtalkdrive__1__0_models.GetDownloadInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.GetDownloadInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.union_id):
query['unionId'] = request.union_id
if not UtilClient.is_unset(request.with_region):
query['withRegion'] = request.with_region
if not UtilClient.is_unset(request.with_internal_resource_url):
query['withInternalResourceUrl'] = request.with_internal_resource_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdrive__1__0_models.GetDownloadInfoResponse(),
await self.do_roarequest_async('GetDownloadInfo', 'drive_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/drive/spaces/{space_id}/files/{file_id}/downloadInfos', 'json', req, runtime)
)
def get_my_space_info(
self,
request: dingtalkdrive__1__0_models.GetMySpaceInfoRequest,
) -> dingtalkdrive__1__0_models.GetMySpaceInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.GetMySpaceInfoHeaders()
return self.get_my_space_info_with_options(request, headers, runtime)
async def get_my_space_info_async(
self,
request: dingtalkdrive__1__0_models.GetMySpaceInfoRequest,
) -> dingtalkdrive__1__0_models.GetMySpaceInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdrive__1__0_models.GetMySpaceInfoHeaders()
return await self.get_my_space_info_with_options_async(request, headers, runtime)
def get_my_space_info_with_options(
self,
request: dingtalkdrive__1__0_models.GetMySpaceInfoRequest,
headers: dingtalkdrive__1__0_models.GetMySpaceInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdrive__1__0_models.GetMySpaceInfoResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.union_id):
query['unionId'] = request.union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
| |
the unicode
get,par = c_rsf.sf_histfloat(self.file,nm.encode('utf-8'))
if get:
return par
else:
return None
else:
try:
return float(self.vd[nm])
except:
return None
def close(self):
# kls
#if not self.copy:
# c_rsf.sf_fileclose(self.f)
_File.close(self)
class Output(_File):
def __init__(self,tag='out',src=None):
if _swig_:
self.tag = tag
self.temp = False
self.srcfile=None
self.headerflushed = False
# c function only knows about utf-8 (ascii). translate the unicode
self.file = c_rsf.sf_output(self.tag.encode('utf-8'))
if src==None and first_input!=None:
#sys.stderr.write("set src=first_input\n")
src=first_input
if src: # clone source file
if hasattr(src,'file'):
srcfile = src.file
srctype = src.type
else:
srcfile = c_rsf.sf_input(src.file)
srctype = c_rsf.sf_gettype(srcfile)
c_rsf.sf_settype(self.file,_File.type.index(srctype))
self.srcfile=srcfile
self.headerflushed = False
if not hasattr(src,'file'):
c_rsf.sf_fileclose(srcfile)
_File.__init__(self,self.tag)
return None
else:
self.temp=None
if src==None :
if first_input==None:
self.header=""
else:
self.header=first_input.header
else:
self.header=src.header
# create dictionary from src file
if tag == 'out':
self.f=sys.stdout
self.pipe=self.is_pipe()
self.filename=self.getfilename()
if self.filename==None:
# cannot find the fine name. Probably in another directory
# make up a temporary name
datapath = os.environ.get('DATAPATH','.')
temp_fd,temp_name =tempfile.mkstemp('',
sys.argv[0],
dir=datapath)
os.close(temp_fd)
self.filename=temp_name[len(datapath):]
#sys.stderr.write("temp_name=%s\n"%temp_name)
#sys.stderr.write("filename=%s\n"%self.filename)
else:
self.filename=tag
self.f=open(self.filename,'w')
self.pipe=False
if not self.pipe:
if self.filename == '/dev/null':
self.filename = 'stdout'
self.pipe=True
else:
datapath = os.environ.get('DATAPATH','.')
# prepend datapath and append @ to filename
self.filename=datapath+'/'+self.filename+'@'
#self.stream=sys.stdout.fileno()
self.headerflushed = False
# create a variable dictionary
self.vd={}
#sys.stderr.write('end Output.__init__ self.pipe=%s\n'%self.pipe)
def tell(self):
if _swig_:
return c_rsf.sf_tell(self.file)
else:
sys.stderr.write('in m8r.py Output.tell\n')
sys.stderr.write('I do not think this function is required.\n')
sys.stderr.write('you can just use self.f.tell()\n')
return self.f.tell()
def is_pipe(self):
try:
self.f.tell()
return False
except:
return True
def getfilename(self):
f_fstat=os.fstat(self.f.fileno())
#sys.stderr.write('f_fstat=%s\n'%repr(f_fstat))
for filename in os.listdir('.'):
if os.path.isfile(filename):
if os.stat(filename).st_ino == f_fstat.st_ino:
return filename
f_dev_null=open('/dev/null','w');
f_dev_stat=os.fstat(f_dev_null.fileno())
if f_dev_stat.st_ino == f_fstat.st_ino:
return '/dev/null'
return None
def put(self,key,val):
if _swig_:
# c function only knows utf-8 (ascii). translate the unicode
if isinstance(val,int):
c_rsf.sf_putint(self.file,key.encode('utf-8'),val)
elif isinstance(val,float):
c_rsf.sf_putfloat(self.file,key.encode('utf-8'),val)
elif isinstance(val,str):
c_rsf.sf_putstring(self.file,key.encode('utf-8'),
val.encode('utf-8'))
elif isinstance(val,list):
if isinstance(val[0],int):
c_rsf.sf_putints(self.file,key.encode('utf-8'),val)
else:
# repr make string representation of an object
if isinstance(val,str):
#make sure string is inclosed in ".." in the .rsf file
self.vd[key]='"'+val+'"'
else:
self.vd[key]="%s"%repr(val)
def write(self,data):
if _swig_:
if (self.headerflushed == False and self.srcfile!=None):
c_rsf.sf_fileflush(self.file,self.srcfile)
self.headerflushed = True
if self.type == 'float':
c_rsf.sf_floatwrite(np.reshape(data.astype(np.float32),(data.size,)),self.file)
elif self.type == 'complex':
c_rsf.sf_complexwrite(np.reshape(data,(data.size,)),
self.file)
elif self.type == 'int':
c_rsf.sf_intwrite(np.reshape(data.astype(np.int32),(data.size,)),self.file)
else:
raise TypeError('Unsupported file type %s' % self.type)
else:
if not self.headerflushed:
#sys.stderr.write('Output.write add datatype to file header\n')
#sys.stderr.write('data.dtype=%s\n'%repr(data.dtype))
if data.dtype==np.float32:
self.put('data_format','native_float')
if data.dtype==np.complex64:
self.put('data_format','native_complex')
if data.dtype==np.int32:
self.put('data_format','native_int')
self.flushheader(first_input)
# kls should check array data type matches file data_format
data.tofile(self.f)
def put_tah(self,trace,header):
if _swig_:
tah=np.array([543711604],dtype=np.int32)
c_rsf.sf_intwrite(tah,self.file);
sizeofint=4
input_record_length=sizeofint*(trace.size+header.size)
c_rsf.sf_intwrite(np.array([input_record_length],dtype=np.int32),
self.file)
c_rsf.sf_floatwrite(trace,self.file)
if header.dtype==np.int32:
c_rsf.sf_intwrite(header,self.file)
else:
c_rsf.sf_floatwrite(header,self.file)
else:
if not self.headerflushed:
if trace.dtype==np.float32:
self.put('data_format','native_float')
if trace.dtype==np.complex64:
sys.stderr.write('error: python Output.put_tah does\n')
sys.stderr.write(' support complex traces\n')
# if you want to add this fix esize below
quit()
self.put('data_format','native_complex')
if trace.dtype==np.int32:
self.put('data_format','native_int')
if header.dtype==np.float32:
self.put('header_format','native_float')
if header.dtype==np.complex64:
sys.stderr.write('error: cannot use complex headrs\n')
quit()
self.put('header_format','native_complex')
if header.dtype==np.int32:
self.put('header_format','native_int')
self.flushheader(first_input)
# check array data type matches file data_format
#temp=np.array([116, 97, 104, 32], dtype=np.int8)
temp=np.array('tah ',dtype=str)
temp.tofile(self.f)
esize=4 # limitted to 4 byte entries
temp=np.array([(trace.size+header.size)*esize],dtype=np.int32)
temp.tofile(self.f)
trace.tofile(self.f)
header.tofile(self.f)
def close(self):
if _swig_:
c_rsf.sf_fileclose(self.file)
_File.close(self)
else:
self.f.flush()
if not self.pipe:
self.f.close()
def flushheader(self,src):
# write the header (saved from the previous (input) file
self.f.write(self.header)
self.headerflushed = True
# write the command name and parameters
self.f.write('\n# execute: ')
for arg in sys.argv:
self.f.write(arg+' ')
self.f.write('\n')
self.f.write('# time=%s\n'%datetime.datetime.now())
self.f.write('\n')
# write the dictionary
for key in self.vd:
self.f.write("%s=%s\n"%(key,self.vd[key]))
if self.pipe:
self.f.write('in="stdout"\n')
self.f.write('in="stdin"\n')
self.f.write("%s%s%s"%(chr(SF_EOL),chr(SF_EOL),chr(SF_EOT)))
else:
self.f.write('in="%s"\n'%self.filename)
self.f.flush()
self.f.close()
self.f=open(self.filename,"w")
dataserver = os.environ.get('RSF_DATASERVER',
'http://www.reproducibility.org')
def Fetch(directory,filename,server=dataserver,top='data'):
'retrieve a file from remote server'
if server == 'local':
remote = os.path.join(top,
directory,os.path.basename(filename))
try:
os.symlink(remote,filename)
except:
print ('Could not link file "%s" ' % remote)
os.unlink(filename)
else:
rdir = os.path.join(server,top,
directory,os.path.basename(filename))
try:
urllib.urlretrieve(rdir,filename)
except:
print ('Could not retrieve file "%s" from "%s"' % (filename,rdir))
class Filter(object):
'Madagascar filter'
plots = ('grey','contour','graph','contour3',
'dots','graph3','thplot','wiggle','grey3')
diagnostic = ('attr','disfil')
def __init__(self,name,prefix='sf',srcs=[],
run=False,checkpar=False,pipe=False):
rsfroot = rsf.prog.RSFROOT
self.plot = False
self.stdout = True
self.prog = None
if rsfroot:
lp = len(prefix)
if name[:lp] != prefix:
name = prefix+name
self.prog = rsf.doc.progs.get(name)
prog = os.path.join(rsfroot,'bin',name)
if os.path.isfile(prog):
self.plot = name[lp:] in Filter.plots
self.stdout = name[lp:] not in Filter.diagnostic
name = prog
self.srcs = srcs
self.run=run
self.command = name
self.checkpar = checkpar
self.pipe = pipe
if self.prog:
self.__doc__ = self.prog.text(None)
def getdoc():
'''for IPython'''
return self.__doc__
def _sage_argspec_():
'''for Sage'''
return None
def __wrapped__():
'''for IPython'''
return None
def __str__(self):
return self.command
def __or__(self,other):
'pipe overload'
self.command = '%s | %s' % (self,other)
return self
def setcommand(self,kw,args=[]):
parstr = []
for (key,val) in kw.items():
if self.checkpar and self.prog and not self.prog.pars.get(key):
sys.stderr.write('checkpar: No %s= parameter in %s\n' %
(key,self.prog.name))
if isinstance(val,str):
val = '\''+val+'\''
elif isinstance(val,File):
val = '\'%s\'' % val
elif isinstance(val,bool):
if val:
val = 'y'
else:
val = 'n'
elif isinstance(val,list):
val = ','.join(map(str,val))
else:
val = str(val)
parstr.append('='.join([key,val]))
self.command = ' '.join([self.command,
' '.join(map(str,args)),
' '.join(parstr)])
def __getitem__(self,srcs):
'Apply to data'
mysrcs = self.srcs[:]
if isinstance(srcs,tuple):
mysrcs.extend(srcs)
elif srcs:
mysrcs.append(srcs)
if self.stdout:
if isinstance(self.stdout,str):
out = self.stdout
else:
out = Temp()
command = '%s > %s' % (self.command,out)
else:
command = self.command
(first,pipe,second) = command.partition('|')
if mysrcs:
command = ' '.join(['< ',str(mysrcs[0]),first]+
map(str,mysrcs[1:])+[pipe,second])
fail = os.system(command)
if fail:
raise RuntimeError('Could not run "%s" ' % command)
if self.stdout:
if self.plot:
return Vplot(out,temp=True)
else:
return File(out,temp=True)
def __call__(self,*args,**kw):
if args:
self.stdout = args[0]
self.run = True
elif not kw and not self.pipe:
self.run = True
self.setcommand(kw,args[1:])
if self.run:
return self[0]
else:
return self
def __getattr__(self,attr):
'Making pipes'
other = Filter(attr)
self.pipe = True
self.command = '%s | %s' % (self,other)
return self
def Vppen(plots,args):
name = Temp()
os.system('vppen %s %s > %s' % (args,' '.join(map(str,plots)),name))
return Vplot(name,temp=True)
def Overlay(*plots):
return Vppen(plots,'erase=o vpstyle=n')
def Movie(*plots):
return Vppen(plots,'vpstyle=n')
def SideBySide(*plots,**kw):
n = len(plots)
iso = kw.get('iso')
if iso:
return Vppen(plots,'size=r vpstyle=n gridnum=%d,1' % n)
else:
return Vppen(plots,'yscale=%d vpstyle=n gridnum=%d,1' % (n,n))
def OverUnder(*plots,**kw):
n = len(plots)
iso = kw.get('iso')
if iso:
return Vppen(plots,'size=r vpstyle=n gridnum=1,%d' % n)
else:
return Vppen(plots,'xscale=%d vpstyle=n gridnum=1,%d' % (n,n))
class Vplot(object):
def __init__(self,name,temp=False,penopts=''):
'Constructor'
self.name = name
self.temp = temp
self.img = None
self.penopts = penopts+' '
def __del__(self):
'Destructor'
if self.temp:
try:
os.unlink(self.name)
except:
raise RuntimeError('Could not remove "%s" ' % self)
def __str__(self):
return self.name
def __mul__(self,other):
return Overlay(self,other)
def __add__(self,other):
return Movie(self,other)
def show(self):
'Show on screen'
os.system('sfpen %s' % self.name)
def hard(self,printer='printer'):
'Send to printer'
os.system('PRINTER=%s pspen %s' % (printer,self.name))
def image(self):
'Convert to PNG in the current directory (for use with IPython and SAGE)'
self.img = os.path.basename(self.name)+'.png'
self.export(self.img,'png',args='bgcolor=w')
def _repr_png_(self):
'return PNG representation'
if not self.img:
self.image()
img = open(self.img,'rb')
guts = img.read()
img.close()
return guts
try:
from IPython.display import Image
@property
def png(self):
return Image(self._repr_png_(), embed=True)
except:
pass
def movie(self):
'Convert to animated GIF in the current directory (for use with SAGE)'
self.gif = os.path.basename(self.name)+'.gif'
self.export(self.gif,'gif',args='bgcolor=w')
def export(self,name,format=None,pen=None,args=''):
'Export to different formats'
from rsf.vpconvert import convert
if not format:
if len(name) > 3:
format = name[-3:].lower()
else:
format = 'vpl'
convert(self.name,name,format,pen,self.penopts+args,verb=False)
class _Wrap(object):
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, name):
try:
return getattr(self.wrapped, name)
except AttributeError:
if name in rsf.doc.progs.keys() or 'sf'+name in rsf.doc.progs.keys():
return Filter(name)
else:
raise
sys.modules[__name__] = _Wrap(sys.modules[__name__])
if __name__ == "__main__":
# a=100 Xa=5
# float=5.625 cc=fgsg
# dd=1,2x4.0,2.25 true=yes false=2*no label="Time (sec)"
# Testing getpar
sys.stderr.write('testing getpar par=Par...\n')
# this is original Par. none of this works with _swig_=True
# par = Par(["prog","a=5","b=as","a=100","par=%s" % sys.argv[0]])
sys.stderr.write('sys.argv=%s\n'%sys.argv)
par = Par(["prog","a=5","b=as","a=100","float=5.625",
"true=y"]) #,"par=%s" % sys.argv[0]])
sys.stderr.write('start test asserts\n')
assert 100 == par.int("a")
assert not par.int("c")
assert 10 == par.int("c",10)
assert 5.625 == par.float("float")
assert par.bool("true")
sys.stderr.write('label=%s\n'%par.string("label"))
#assert "Time (sec)" == par.string("label")
#assert "Time (sec)" == par.string("label","Depth")
sys.stderr.write('nolabel=%s\n'%repr(par.string("nolabel")))
assert not par.string("nolabel")
sys.stderr.write('nolabel,Depth=%s\n'%repr(par.string("nolabel","Depth")))
assert "Depth" == par.string("nolabel","Depth")
# no function for this par.close()
# Testing file
# Redirect input and output
inp = os.popen("sfspike n1=100 d1=0.25 nsp=2 k1=1,10 label1='Time'")
out = open("junk.rsf","w")
os.dup2(inp.fileno(),sys.stdin.fileno())
os.dup2(out.fileno(),sys.stdout.fileno())
# Initialize
par = Par()
input = Input()
output = Output()
# Test
assert | |
# *****************************************************************************
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import os
import time
import pytz
import numpy as np
import pandas as pd
from datetime import date, datetime
from dateutil import tz
from perspective.table import Table
LOCAL_DATETIMES = [
datetime(2019, 1, 11, 0, 10, 20),
datetime(2019, 1, 11, 11, 10, 20),
datetime(2019, 1, 11, 19, 10, 20)
]
# Test the DST transition for Continental US
LOCAL_DATETIMES_DST = [
datetime(2019, 3, 9, 12, 10, 20),
datetime(2019, 3, 19, 12, 10, 20),
datetime(2019, 11, 2, 12, 10, 20),
datetime(2019, 11, 3, 12, 10, 20)
]
LOCAL_TIMESTAMPS = [pd.Timestamp(d) for d in LOCAL_DATETIMES]
LOCAL_TIMESTAMPS_DST = [pd.Timestamp(d) for d in LOCAL_DATETIMES_DST]
# Set up testing data
UTC = pytz.UTC
UTC_DATETIMES = [UTC.localize(d) for d in LOCAL_DATETIMES]
UTC_TIMESTAMPS = [UTC.localize(d) for d in LOCAL_TIMESTAMPS]
UTC_DATETIMES_DST = [UTC.localize(d, is_dst=True) for d in LOCAL_DATETIMES_DST]
UTC_TIMESTAMPS_DST = [UTC.localize(d, is_dst=True) for d in LOCAL_TIMESTAMPS_DST]
PST = pytz.timezone("US/Pacific")
CST = pytz.timezone("US/Central")
EST = pytz.timezone("US/Eastern")
GMT = pytz.timezone("GMT")
HKT = pytz.timezone("Asia/Hong_Kong")
JPT = pytz.timezone("Asia/Tokyo")
ACT = pytz.timezone("Australia/ACT")
TIMEZONES = [PST, CST, EST, GMT, HKT, JPT, ACT]
TZ_DATETIMES = {}
TZ_TIMESTAMPS = {}
TZ_DATETIMES_DST = {}
TZ_TIMESTAMPS_DST = {}
for TZ in TIMEZONES:
TZ_DATETIMES[TZ.zone] = [TZ.localize(d) for d in LOCAL_DATETIMES]
TZ_TIMESTAMPS[TZ.zone] = [d.tz_localize(TZ) for d in LOCAL_TIMESTAMPS]
TZ_DATETIMES_DST[TZ.zone] = [d.astimezone(TZ) for d in UTC_DATETIMES_DST]
TZ_TIMESTAMPS_DST[TZ.zone] = [d.tz_convert(TZ) for d in UTC_TIMESTAMPS_DST]
if os.name != 'nt':
# no tzset on windows, run these tests on linux/mac only
class TestTableLocalDateTime(object):
"""Test datetimes across configurations such as local time, timezone-aware,
timezone-naive, and UTC implementations.
"""
def setup_method(self):
# To make sure that local times are not changed, set timezone to EST
os.environ["TZ"] = "US/Eastern"
time.tzset()
def teardown_method(self):
# go back to UTC at end of each test method, for consistency
os.environ["TZ"] = "UTC"
time.tzset()
def test_table_should_assume_local_time(self):
"""If a datetime object has no `tzinfo`, it should be assumed to be in
local time and not be converted at all.
"""
data = {
"a": LOCAL_DATETIMES
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES
def test_table_should_assume_local_time_numpy_datetime64(self):
data = {
"a": [np.datetime64(d) for d in LOCAL_DATETIMES]
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES
def test_table_should_assume_local_time_pandas_timestamp(self):
data = {
"a": LOCAL_TIMESTAMPS
}
# Timestamps are assumed to be in UTC by pandas
table = Table(data)
# Timestamps are read out in local time
assert table.view().to_dict()["a"] == LOCAL_DATETIMES
def test_table_should_assume_local_time_pandas_timestamp_df(self):
data = pd.DataFrame({
"a": LOCAL_TIMESTAMPS
})
# Timestamps are assumed to be in UTC by pandas
table = Table(data)
# Timestamps are read out in local time
assert table.view().to_dict()["a"] == [
datetime(2019, 1, 10, 19, 10, 20),
datetime(2019, 1, 11, 6, 10, 20),
datetime(2019, 1, 11, 14, 10, 20)
]
def test_table_should_assume_local_time_dst(self):
"""If a datetime object has no `tzinfo`, it should be assumed to be in
local time and not be converted at all.
"""
data = {
"a": LOCAL_DATETIMES_DST
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES_DST
def test_table_should_assume_local_time_numpy_datetime64_dst(self):
data = {
"a": [np.datetime64(d) for d in LOCAL_DATETIMES_DST]
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES_DST
def test_table_should_assume_local_time_pandas_timestamp_dst(self):
data = {
"a": LOCAL_TIMESTAMPS_DST
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES_DST
def test_table_should_assume_local_time_pandas_timestamp_dst_df(self):
data = pd.DataFrame({
"a": LOCAL_TIMESTAMPS_DST
})
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(2019, 3, 9, 7, 10, 20),
datetime(2019, 3, 19, 8, 10, 20),
datetime(2019, 11, 2, 8, 10, 20),
datetime(2019, 11, 3, 7, 10, 20)
]
def test_table_datetime_min(self):
data = {
"a": [datetime.min]
}
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1969, 12, 31, 19, 0)
]
def test_table_datetime_min_df(self):
data = pd.DataFrame({
"a": [datetime.min]
})
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1969, 12, 31, 19, 0)
]
def test_table_datetime_1900(self):
data = {
"a": [datetime(1900, 1, 1)]
}
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1900, 1, 1)
]
def test_table_datetime_1900_df(self):
data = pd.DataFrame({
"a": [datetime(1900, 1, 1)]
})
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1899, 12, 31, 19)
]
def test_table_datetime_1899(self):
data = {
"a": [datetime(1899, 1, 1)]
}
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1898, 12, 31, 19)
]
def test_table_datetime_1899_df(self):
data = pd.DataFrame({
"a": [datetime(1899, 1, 1)]
})
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1898, 12, 31, 19)
]
def test_table_datetime_min_epoch(self):
data = {
"a": [0]
}
table = Table({
"a": datetime
})
table.update(data)
assert table.view().to_dict()["a"] == [
datetime(1969, 12, 31, 19, 0)
]
def test_table_datetime_min_epoch_df(self):
data = pd.DataFrame({
"a": [0]
})
table = Table({
"a": datetime
})
table.update(data)
assert table.view().to_dict()["a"] == [
datetime(1969, 12, 31, 19, 0)
]
class TestTableDateTimeUTCToLocal(object):
def teardown_method(self):
# Set timezone to UTC, always
os.environ["TZ"] = "UTC"
time.tzset()
def test_table_should_convert_UTC_to_local_time_pytz_pacific(self):
"""If the datetime has `tzinfo` set, use it to convert the datetime to
UTC. Make sure this works with both `pytz` and `dateutil` for
`datetime` and `pandas.Timestamp`.
"""
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict() == {
"a": [d.astimezone(PST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_central(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict() == {
"a": [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_eastern(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict() == {
"a": [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_GMT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict() == {
"a": [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_HKT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "Asia/Hong_Kong"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(HKT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_JPT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "Asia/Tokyo"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(JPT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_ACT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "Australia/Sydney"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(ACT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_pacific(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict() == {
"a": [d.astimezone(PST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_central(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict() == {
"a": [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_eastern(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict() == {
"a": [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_GMT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict() == {
"a": [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_pacific_DST(self):
data = {
"a": UTC_DATETIMES_DST
}
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict() == {
"a": [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Pacific"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_central_DST(self):
data = {
"a": UTC_DATETIMES_DST
}
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict() == {
"a": [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Central"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_eastern_DST(self):
data = {
"a": UTC_DATETIMES_DST
}
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict() == {
"a": [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Eastern"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_GMT_DST(self):
data = {
"a": UTC_DATETIMES_DST
}
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict() == {
"a": [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["GMT"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_pacific_DST_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS_DST
})
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict()["a"] == [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Pacific"]]
def test_table_should_convert_UTC_to_local_time_dateutil_central_DST_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS_DST
})
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict()["a"] == [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Central"]]
def test_table_should_convert_UTC_to_local_time_dateutil_eastern_DST_timestamp(self):
data = | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_versionedobjects import fields
from zun.common import consts
from zun.common import exception
from zun.common.i18n import _
from zun.db import api as dbapi
from zun.objects import base
from zun.objects import exec_instance as exec_inst
from zun.objects import fields as z_fields
from zun.objects import pci_device
from zun.objects import registry
LOG = logging.getLogger(__name__)
CONTAINER_OPTIONAL_ATTRS = ["pci_devices", "exec_instances", "registry"]
@base.ZunObjectRegistry.register
class Cpuset(base.ZunObject):
VERSION = '1.0'
fields = {
'cpuset_cpus': fields.SetOfIntegersField(nullable=True),
'cpuset_mems': fields.SetOfIntegersField(nullable=True),
}
def _to_dict(self):
return {
'cpuset_cpus': self.cpuset_cpus,
'cpuset_mems': self.cpuset_mems
}
@classmethod
def _from_dict(cls, data_dict):
if not data_dict:
obj = cls(cpuset_cpus=None, cpuset_mems=None)
else:
cpuset_cpus = data_dict.get('cpuset_cpus')
cpuset_mems = data_dict.get('cpuset_mems')
obj = cls(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems)
obj.obj_reset_changes()
return obj
class ContainerBase(base.ZunPersistentObject, base.ZunObject):
fields = {
'id': fields.IntegerField(),
'container_id': fields.StringField(nullable=True),
'uuid': fields.UUIDField(nullable=True),
'name': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'image': fields.StringField(nullable=True),
'cpu': fields.FloatField(nullable=True),
'cpu_policy': fields.StringField(nullable=True),
'cpuset': fields.ObjectField("Cpuset", nullable=True),
'memory': fields.StringField(nullable=True),
'command': fields.ListOfStringsField(nullable=True),
'status': z_fields.ContainerStatusField(nullable=True),
'status_reason': fields.StringField(nullable=True),
'task_state': z_fields.TaskStateField(nullable=True),
'environment': fields.DictOfStringsField(nullable=True),
'workdir': fields.StringField(nullable=True),
'auto_remove': fields.BooleanField(nullable=True),
'ports': z_fields.ListOfIntegersField(nullable=True),
'hostname': fields.StringField(nullable=True),
'labels': fields.DictOfStringsField(nullable=True),
'addresses': z_fields.JsonField(nullable=True),
'image_pull_policy': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
'restart_policy': fields.DictOfStringsField(nullable=True),
'status_detail': fields.StringField(nullable=True),
'interactive': fields.BooleanField(nullable=True),
'tty': fields.BooleanField(nullable=True),
'image_driver': fields.StringField(nullable=True),
'websocket_url': fields.StringField(nullable=True),
'websocket_token': fields.StringField(nullable=True),
'security_groups': fields.ListOfStringsField(nullable=True),
'runtime': fields.StringField(nullable=True),
'pci_devices': fields.ListOfObjectsField('PciDevice',
nullable=True),
'disk': fields.IntegerField(nullable=True),
'auto_heal': fields.BooleanField(nullable=True),
'started_at': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'exposed_ports': z_fields.JsonField(nullable=True),
'exec_instances': fields.ListOfObjectsField('ExecInstance',
nullable=True),
'privileged': fields.BooleanField(nullable=True),
'healthcheck': z_fields.JsonField(nullable=True),
'registry_id': fields.IntegerField(nullable=True),
'registry': fields.ObjectField("Registry", nullable=True),
'annotations': z_fields.JsonField(nullable=True),
'cni_metadata': z_fields.JsonField(nullable=True),
'entrypoint': fields.ListOfStringsField(nullable=True),
}
# should be redefined in subclasses
container_type = None
@staticmethod
def _from_db_object(container, db_container):
"""Converts a database entity to a formal object."""
for field in container.fields:
if field in ['pci_devices', 'exec_instances', 'registry',
'containers', 'init_containers']:
continue
if field == 'cpuset':
container.cpuset = Cpuset._from_dict(
db_container['cpuset'])
continue
setattr(container, field, db_container[field])
container.obj_reset_changes()
return container
@staticmethod
def _from_db_object_list(db_objects, cls, context):
"""Converts a list of database entities to a list of formal objects."""
return [cls._from_db_object(cls(context), obj)
for obj in db_objects]
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
"""Find a container based on uuid and return a :class:`Container` object.
:param uuid: the uuid of a container.
:param context: Security context
:returns: a :class:`Container` object.
"""
db_container = dbapi.get_container_by_uuid(context, cls.container_type,
uuid)
container = cls._from_db_object(cls(context), db_container)
return container
@base.remotable_classmethod
def get_by_name(cls, context, name):
"""Find a container based on name and return a Container object.
:param name: the logical name of a container.
:param context: Security context
:returns: a :class:`Container` object.
"""
db_container = dbapi.get_container_by_name(context, cls.container_type,
name)
container = cls._from_db_object(cls(context), db_container)
return container
@staticmethod
def get_container_any_type(context, uuid):
"""Find a container of any type based on uuid.
:param uuid: the uuid of a container.
:param context: Security context
:returns: a :class:`ContainerBase` object.
"""
db_container = dbapi.get_container_by_uuid(context, consts.TYPE_ANY,
uuid)
type = db_container['container_type']
if type == consts.TYPE_CONTAINER:
container_cls = Container
elif type == consts.TYPE_CAPSULE:
container_cls = Capsule
elif type == consts.TYPE_CAPSULE_CONTAINER:
container_cls = CapsuleContainer
elif type == consts.TYPE_CAPSULE_INIT_CONTAINER:
container_cls = CapsuleInitContainer
else:
raise exception.ZunException(_('Unknown container type: %s'), type)
obj = container_cls(context)
container = container_cls._from_db_object(obj, db_container)
return container
@base.remotable_classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None, filters=None):
"""Return a list of Container objects.
:param context: Security context.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:param filters: filters when list containers, the filter name could be
'name', 'image', 'project_id', 'user_id', 'memory'.
For example, filters={'image': 'nginx'}
:returns: a list of :class:`Container` object.
"""
db_containers = dbapi.list_containers(
context, cls.container_type, limit=limit, marker=marker,
sort_key=sort_key, sort_dir=sort_dir, filters=filters)
return cls._from_db_object_list(db_containers, cls, context)
@base.remotable_classmethod
def list_by_host(cls, context, host):
"""Return a list of Container objects by host.
:param context: Security context.
:param host: A compute host.
:returns: a list of :class:`Container` object.
"""
db_containers = dbapi.list_containers(context, cls.container_type,
filters={'host': host})
return cls._from_db_object_list(db_containers, cls, context)
@base.remotable
def create(self, context):
"""Create a Container record in the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Container(context)
"""
values = self.obj_get_changes()
cpuset_obj = values.pop('cpuset', None)
if cpuset_obj is not None:
values['cpuset'] = cpuset_obj._to_dict()
annotations = values.pop('annotations', None)
if annotations is not None:
values['annotations'] = self.fields['annotations'].to_primitive(
self, 'annotations', self.annotations)
cni_metadata = values.pop('cni_metadata', None)
if cni_metadata is not None:
values['cni_metadata'] = self.fields['cni_metadata'].to_primitive(
self, 'cni_metadata', self.cni_metadata)
values['container_type'] = self.container_type
db_container = dbapi.create_container(context, values)
self._from_db_object(self, db_container)
@base.remotable
def destroy(self, context=None):
"""Delete the Container from the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Container(context)
"""
dbapi.destroy_container(context, self.container_type, self.uuid)
self.obj_reset_changes()
@base.remotable
def save(self, context=None):
"""Save updates to this Container.
Updates will be made column by column based on the result
of self.what_changed().
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Container(context)
"""
updates = self.obj_get_changes()
cpuset_obj = updates.pop('cpuset', None)
if cpuset_obj is not None:
updates['cpuset'] = cpuset_obj._to_dict()
annotations = updates.pop('annotations', None)
if annotations is not None:
updates['annotations'] = self.fields['annotations'].to_primitive(
self, 'annotations', self.annotations)
cni_metadata = updates.pop('cni_metadata', None)
if cni_metadata is not None:
updates['cni_metadata'] = self.fields['cni_metadata'].to_primitive(
self, 'cni_metadata', self.cni_metadata)
dbapi.update_container(context, self.container_type, self.uuid,
updates)
self.obj_reset_changes()
@base.remotable
def refresh(self, context=None):
"""Loads updates for this Container.
Loads a container with the same uuid from the database and
checks for updated attributes. Updates are applied from
the loaded container column by column, if there are any updates.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Container(context)
"""
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
for field in self.fields:
if self.obj_attr_is_set(field) and \
getattr(self, field) != getattr(current, field):
setattr(self, field, getattr(current, field))
def obj_load_attr(self, attrname):
if attrname not in CONTAINER_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
# NOTE(danms): We handle some fields differently here so that we
# can be more efficient
if attrname == 'pci_devices':
self._load_pci_devices()
if attrname == 'exec_instances':
self._load_exec_instances()
if attrname == 'registry':
self._load_registry()
self.obj_reset_changes([attrname])
def _load_pci_devices(self):
self.pci_devices = pci_device.PciDevice.list_by_container_uuid(
self._context, self.uuid)
def _load_exec_instances(self):
self.exec_instances = exec_inst.ExecInstance.list_by_container_id(
self._context, self.id)
def _load_registry(self):
self.registry = None
if self.registry_id:
self.registry = registry.Registry.get_by_id(
self._context, self.registry_id)
@base.remotable_classmethod
def get_count(cls, context, project_id, flag):
"""Get the counts of Container objects in the database.
:param context: The request context for database access.
:param project_id: The project_id to count across.
:param flag: The name of resource, one of the following options:
- containers: Count the number of containers owned by the
project.
- memory: The sum of containers's memory.
- cpu: The sum of container's cpu.
- disk: The sum of container's disk size.
"""
usage = dbapi.count_usage(context, cls.container_type, project_id,
flag)[0] or 0.0
return usage
@base.ZunObjectRegistry.register
class Container(ContainerBase):
# Version 1.0: Initial version
# Version 1.1: Add container_id column
# Version 1.2: Add memory column
# Version 1.3: Add task_state column
# Version 1.4: Add cpu, workdir, ports, hostname and labels columns
# Version 1.5: Add meta column
# Version 1.6: Add addresses column
# Version 1.7: Add host column
# Version 1.8: Add restart_policy
# Version 1.9: Add status_detail column
# Version 1.10: Add tty, stdin_open
# Version 1.11: Add image_driver
# Version 1.12: Add 'Created' | |
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> str:
return pulumi.get(self, "volume_type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
return pulumi.get(self, "iops")
@pulumi.output_type
class InstanceFleetConfigConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "configurationProperties":
suggest = "configuration_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
classification: Optional[str] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[Sequence['outputs.InstanceFleetConfigConfiguration']] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
return pulumi.get(self, "classification")
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.InstanceFleetConfigConfiguration']]:
return pulumi.get(self, "configurations")
@pulumi.output_type
class InstanceFleetConfigEbsBlockDeviceConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "volumeSpecification":
suggest = "volume_specification"
elif key == "volumesPerInstance":
suggest = "volumes_per_instance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigEbsBlockDeviceConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigEbsBlockDeviceConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigEbsBlockDeviceConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
volume_specification: 'outputs.InstanceFleetConfigVolumeSpecification',
volumes_per_instance: Optional[int] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> 'outputs.InstanceFleetConfigVolumeSpecification':
return pulumi.get(self, "volume_specification")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
return pulumi.get(self, "volumes_per_instance")
@pulumi.output_type
class InstanceFleetConfigEbsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ebsBlockDeviceConfigs":
suggest = "ebs_block_device_configs"
elif key == "ebsOptimized":
suggest = "ebs_optimized"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigEbsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigEbsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigEbsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ebs_block_device_configs: Optional[Sequence['outputs.InstanceFleetConfigEbsBlockDeviceConfig']] = None,
ebs_optimized: Optional[bool] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[Sequence['outputs.InstanceFleetConfigEbsBlockDeviceConfig']]:
return pulumi.get(self, "ebs_block_device_configs")
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[bool]:
return pulumi.get(self, "ebs_optimized")
@pulumi.output_type
class InstanceFleetConfigInstanceFleetProvisioningSpecifications(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "onDemandSpecification":
suggest = "on_demand_specification"
elif key == "spotSpecification":
suggest = "spot_specification"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigInstanceFleetProvisioningSpecifications. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
on_demand_specification: Optional['outputs.InstanceFleetConfigOnDemandProvisioningSpecification'] = None,
spot_specification: Optional['outputs.InstanceFleetConfigSpotProvisioningSpecification'] = None):
if on_demand_specification is not None:
pulumi.set(__self__, "on_demand_specification", on_demand_specification)
if spot_specification is not None:
pulumi.set(__self__, "spot_specification", spot_specification)
@property
@pulumi.getter(name="onDemandSpecification")
def on_demand_specification(self) -> Optional['outputs.InstanceFleetConfigOnDemandProvisioningSpecification']:
return pulumi.get(self, "on_demand_specification")
@property
@pulumi.getter(name="spotSpecification")
def spot_specification(self) -> Optional['outputs.InstanceFleetConfigSpotProvisioningSpecification']:
return pulumi.get(self, "spot_specification")
@pulumi.output_type
class InstanceFleetConfigInstanceTypeConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceType":
suggest = "instance_type"
elif key == "bidPrice":
suggest = "bid_price"
elif key == "bidPriceAsPercentageOfOnDemandPrice":
suggest = "bid_price_as_percentage_of_on_demand_price"
elif key == "customAmiId":
suggest = "custom_ami_id"
elif key == "ebsConfiguration":
suggest = "ebs_configuration"
elif key == "weightedCapacity":
suggest = "weighted_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigInstanceTypeConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigInstanceTypeConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigInstanceTypeConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_type: str,
bid_price: Optional[str] = None,
bid_price_as_percentage_of_on_demand_price: Optional[float] = None,
configurations: Optional[Sequence['outputs.InstanceFleetConfigConfiguration']] = None,
custom_ami_id: Optional[str] = None,
ebs_configuration: Optional['outputs.InstanceFleetConfigEbsConfiguration'] = None,
weighted_capacity: Optional[int] = None):
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[float]:
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.InstanceFleetConfigConfiguration']]:
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[str]:
return pulumi.get(self, "custom_ami_id")
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional['outputs.InstanceFleetConfigEbsConfiguration']:
return pulumi.get(self, "ebs_configuration")
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[int]:
return pulumi.get(self, "weighted_capacity")
@pulumi.output_type
class InstanceFleetConfigOnDemandProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allocationStrategy":
suggest = "allocation_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigOnDemandProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigOnDemandProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigOnDemandProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allocation_strategy: str):
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
return pulumi.get(self, "allocation_strategy")
@pulumi.output_type
class InstanceFleetConfigSpotProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "timeoutAction":
suggest = "timeout_action"
elif key == "timeoutDurationMinutes":
suggest = "timeout_duration_minutes"
elif key == "allocationStrategy":
suggest = "allocation_strategy"
elif key == "blockDurationMinutes":
suggest = "block_duration_minutes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigSpotProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigSpotProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigSpotProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
timeout_action: str,
timeout_duration_minutes: int,
allocation_strategy: Optional[str] = None,
block_duration_minutes: Optional[int] = None):
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> str:
return pulumi.get(self, "timeout_action")
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> int:
return pulumi.get(self, "timeout_duration_minutes")
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[str]:
return pulumi.get(self, "allocation_strategy")
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[int]:
return pulumi.get(self, "block_duration_minutes")
@pulumi.output_type
class InstanceFleetConfigVolumeSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sizeInGB":
suggest = "size_in_gb"
elif key == "volumeType":
suggest = "volume_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigVolumeSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigVolumeSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigVolumeSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
size_in_gb: int,
volume_type: str,
iops: Optional[int] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> str:
return pulumi.get(self, "volume_type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
return pulumi.get(self, "iops")
@pulumi.output_type
class InstanceGroupConfigAutoScalingPolicy(dict):
def __init__(__self__, *,
constraints: 'outputs.InstanceGroupConfigScalingConstraints',
rules: Sequence['outputs.InstanceGroupConfigScalingRule']):
pulumi.set(__self__, "constraints", constraints)
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def constraints(self) -> 'outputs.InstanceGroupConfigScalingConstraints':
return pulumi.get(self, "constraints")
@property
@pulumi.getter
def rules(self) -> Sequence['outputs.InstanceGroupConfigScalingRule']:
return pulumi.get(self, "rules")
@pulumi.output_type
class InstanceGroupConfigCloudWatchAlarmDefinition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "comparisonOperator":
suggest = "comparison_operator"
elif key == "metricName":
suggest = "metric_name"
elif key == "evaluationPeriods":
suggest = "evaluation_periods"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigCloudWatchAlarmDefinition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigCloudWatchAlarmDefinition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigCloudWatchAlarmDefinition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
comparison_operator: str,
metric_name: str,
period: int,
threshold: float,
dimensions: Optional[Sequence['outputs.InstanceGroupConfigMetricDimension']] = None,
evaluation_periods: Optional[int] = None,
namespace: Optional[str] = None,
statistic: Optional[str] = None,
unit: Optional[str] = None):
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> str:
return pulumi.get(self, "comparison_operator")
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> str:
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def period(self) -> int:
return pulumi.get(self, "period")
@property
@pulumi.getter
def threshold(self) -> float:
return pulumi.get(self, "threshold")
@property
@pulumi.getter
def dimensions(self) -> Optional[Sequence['outputs.InstanceGroupConfigMetricDimension']]:
return pulumi.get(self, "dimensions")
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[int]:
return pulumi.get(self, "evaluation_periods")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def statistic(self) -> Optional[str]:
return pulumi.get(self, "statistic")
| |
<reponame>Special-K-s-Flightsim-Bots/DCSServerBot<filename>plugins/admin/commands.py
# commands.py
import asyncio
import discord
import os
import platform
import psycopg2
import psycopg2.extras
import re
import subprocess
from contextlib import closing
from core import utils, DCSServerBot, Plugin
from discord.ext import commands, tasks
from typing import Union
from .listener import AdminEventListener
class Agent(Plugin):
STATUS_EMOJI = {
'Loading': '🔄',
'Paused': '⏸️',
'Running': '▶️',
'Stopped': '⏹️'
}
def __init__(self, plugin, bot, listener):
super().__init__(plugin, bot, listener)
self.update_bot_status.start()
def cog_unload(self):
self.update_bot_status.cancel()
super().cog_unload(self)
@commands.command(description='Lists the registered DCS servers')
@utils.has_role('DCS')
@commands.guild_only()
async def servers(self, ctx):
if len(self.bot.DCSServers) > 0:
for server_name, server in self.bot.DCSServers.items():
if server['status'] in ['Running', 'Paused']:
mission = await self.bot.sendtoDCSSync(server, {"command": "getRunningMission", "channel": 0})
await ctx.send(embed=utils.format_mission_embed(self, mission))
else:
await ctx.send('No server running on host {}'.format(platform.node()))
@commands.command(description='Starts a DCS/DCS-SRS server')
@utils.has_role('DCS Admin')
@commands.guild_only()
async def startup(self, ctx):
server = await utils.get_server(self, ctx)
if server:
installation = server['installation']
if server['status'] in ['Stopped', 'Shutdown']:
await ctx.send('DCS server "{}" starting up ...'.format(server['server_name']))
utils.start_dcs(self, installation)
server['status'] = 'Loading'
await self.bot.audit(
f"User {ctx.message.author.display_name} started DCS server \"{server['server_name']}\".")
else:
await ctx.send('DCS server "{}" is already started.'.format(server['server_name']))
if 'SRS_CONFIG' in self.config[installation]:
if not utils.isOpen(self.config[installation]['SRS_HOST'], self.config[installation]['SRS_PORT']):
if await utils.yn_question(self, ctx, 'Do you want to start the DCS-SRS server "{}"?'.format(server['server_name'])) is True:
await ctx.send('DCS-SRS server "{}" starting up ...'.format(server['server_name']))
utils.start_srs(self, installation)
await self.bot.audit(
f"User {ctx.message.author.display_name} started DCS-SRS server \"{server['server_name']}\".")
else:
await ctx.send('DCS-SRS server "{}" is already started.'.format(server['server_name']))
@commands.command(description='Shutdown a DCS/DCS-SRS server')
@utils.has_role('DCS Admin')
@commands.guild_only()
async def shutdown(self, ctx):
server = await utils.get_server(self, ctx)
if server:
installation = server['installation']
if server['status'] in ['Unknown', 'Loading']:
await ctx.send('Server is currently starting up. Please wait and try again.')
elif server['status'] not in ['Stopped', 'Shutdown']:
if await utils.yn_question(self, ctx, 'Do you want to shut down the DCS server "{}"?'.format(server['server_name'])) is True:
await ctx.send('Shutting down DCS server "{}" ...'.format(server['server_name']))
self.bot.sendtoDCS(server, {"command": "shutdown", "channel": ctx.channel.id})
server['status'] = 'Shutdown'
await self.bot.audit(
f"User {ctx.message.author.display_name} shut DCS server \"{server['server_name']}\" down.")
else:
await ctx.send('DCS server {} is already shut down.'.format(server['server_name']))
if 'SRS_CONFIG' in self.config[installation]:
if utils.isOpen(self.config[installation]['SRS_HOST'], self.config[installation]['SRS_PORT']):
if await utils.yn_question(self, ctx, 'Do you want to shut down the DCS-SRS server "{}"?'.format(server['server_name'])) is True:
p = utils.findProcess('SR-Server.exe', installation)
if p:
await ctx.send('Shutting down DCS-SRS server "{}" ...'.format(server['server_name']))
p.kill()
await self.bot.audit(
f"User {ctx.message.author.display_name} shut DCS-SRS server \"{server['server_name']}\" down.")
else:
await ctx.send('Shutdown of DCS-SRS server "{}" failed.'.format(server['server_name']))
else:
await ctx.send('DCS-SRS server {} is already shut down.'.format(server['server_name']))
@commands.command(description='Update a DCS Installation')
@utils.has_role('DCS Admin')
@commands.guild_only()
async def update(self, ctx):
server = await utils.get_server(self, ctx)
if server:
# check versions
branch, old_version = utils.getInstalledVersion(self.config['DCS']['DCS_INSTALLATION'])
new_version = await utils.getLatestVersion(branch)
if old_version == new_version:
await ctx.send('Your installed version {} is the latest on branch {}.'.format(old_version, branch))
else:
await self.bot.audit(
f"User {ctx.message.author.display_name} started an update of all DCS servers on node {platform.node()}.")
servers = []
for key, item in self.bot.DCSServers.items():
if item['status'] not in ['Stopped', 'Shutdown']:
servers.append(item)
if len(servers):
if await utils.yn_question(self, ctx, 'Would you like me to stop the running servers and run the update?') is True:
for server in servers:
self.bot.sendtoDCS(server, {"command": "shutdown", "channel": ctx.channel.id})
await ctx.send('Shutting down server "{}" ...'.format(server['server_name']))
server['status'] = 'Shutdown'
else:
return
if await utils.yn_question(self, ctx, 'Would you like to update from version {} to {}?'.format(old_version, new_version)) is True:
self.log.info('Updating DCS to the latest version.')
subprocess.Popen(['dcs_updater.exe', '--quiet', 'update'], executable=os.path.expandvars(
self.config['DCS']['DCS_INSTALLATION']) + '\\bin\\dcs_updater.exe')
await ctx.send('Updating DCS to the latest version ...')
@commands.command(description='Change the password of a DCS server')
@utils.has_role('DCS Admin')
@commands.guild_only()
async def password(self, ctx):
server = await utils.get_server(self, ctx)
if server:
if server['status'] == 'Shutdown':
msg = await ctx.send('Please enter the new password: ')
response = await self.bot.wait_for('message', timeout=300.0)
password = response.content
await msg.delete()
await response.delete()
utils.changeServerSettings(server['server_name'], 'password', password)
await ctx.send('Password has been changed.')
await self.bot.audit(
f"User {ctx.message.author.display_name} changed the password of server \"{server['server_name']}\".")
else:
await ctx.send('Server "{}" has to be shut down to change the password.'.format(server['server_name']))
@commands.command(description='Kick a user by ucid', usage='<ucid>')
@utils.has_role('DCS Admin')
@commands.guild_only()
async def kick(self, ctx, name, *args):
server = await utils.get_server(self, ctx)
if server:
if len(args) > 0:
reason = ' '.join(args)
else:
reason = 'n/a'
self.bot.sendtoDCS(server, {"command": "kick", "name": name, "reason": reason})
await ctx.send(f'User "{name}" kicked.')
await self.bot.audit(f'User {ctx.message.author.display_name} kicked player {name}' +
f' with reason "{reason}".' if reason != 'n/a' else '.')
@commands.command(description='Bans a user by ucid or discord id', usage='<member / ucid> [reason]')
@utils.has_role('DCS Admin')
@commands.guild_only()
async def ban(self, ctx, user: Union[discord.Member, str], *args):
if len(args) > 0:
reason = ' '.join(args)
else:
reason = 'n/a'
conn = self.pool.getconn()
try:
with closing(conn.cursor()) as cursor:
if isinstance(user, discord.Member):
# a player can have multiple ucids
cursor.execute('SELECT ucid FROM players WHERE discord_id = %s', (user.id, ))
ucids = [row[0] for row in cursor.fetchall()]
else:
# ban a specific ucid only
ucids = [user]
for ucid in ucids:
for server in self.bot.DCSServers.values():
self.bot.sendtoDCS(server, {
"command": "ban",
"ucid": ucid,
"reason": reason
})
except (Exception, psycopg2.DatabaseError) as error:
self.log.exception(error)
finally:
self.pool.putconn(conn)
@commands.command(description='Unbans a user by ucid or discord id', usage='<member / ucid>')
@utils.has_role('DCS Admin')
@commands.guild_only()
async def unban(self, ctx, user: Union[discord.Member, str]):
conn = self.pool.getconn()
try:
with closing(conn.cursor()) as cursor:
if isinstance(user, discord.Member):
# a player can have multiple ucids
cursor.execute('SELECT ucid FROM players WHERE discord_id = %s', (user.id, ))
ucids = [row[0] for row in cursor.fetchall()]
else:
# unban a specific ucid only
ucids = [user]
for ucid in ucids:
for server in self.bot.DCSServers.values():
self.bot.sendtoDCS(server, {"command": "unban", "ucid": ucid})
except (Exception, psycopg2.DatabaseError) as error:
self.log.exception(error)
finally:
self.pool.putconn(conn)
@commands.command(description='Unregisters the server from this instance')
@utils.has_role('Admin')
@commands.guild_only()
async def unregister(self, ctx, node=platform.node()):
server = await utils.get_server(self, ctx)
if server:
server_name = server['server_name']
if server['status'] in ['Stopped', 'Shutdown']:
if await utils.yn_question(self, ctx, 'Are you sure to unregister server "{}" from node "{}"?'.format(server_name, node)) is True:
self.bot.embeds.pop(server_name)
await ctx.send('Server {} unregistered.'.format(server_name))
await self.bot.audit(
f"User {ctx.message.author.display_name} unregistered DCS server \"{server['server_name']}\".")
else:
await ctx.send('Aborted.')
else:
await ctx.send('Please stop server "{}" before unregistering!'.format(server_name))
@commands.command(description='Rename a server')
@utils.has_role('Admin')
@commands.guild_only()
async def rename(self, ctx, *args):
server = await utils.get_server(self, ctx)
if server:
oldname = server['server_name']
newname = ' '.join(args)
if server['status'] in ['Stopped', 'Shutdown']:
conn = self.pool.getconn()
try:
if await utils.yn_question(self, ctx, 'Are you sure to rename server "{}" to "{}"?'.format(oldname, newname)) is True:
with closing(conn.cursor()) as cursor:
cursor.execute('UPDATE servers SET server_name = %s WHERE server_name = %s',
(newname, oldname))
cursor.execute('UPDATE message_persistence SET server_name = %s WHERE server_name = %s',
(newname, oldname))
cursor.execute('UPDATE missions SET server_name = %s WHERE server_name = %s',
(newname, oldname))
conn.commit()
utils.changeServerSettings(server['server_name'], 'name', newname)
server['server_name'] = newname
self.bot.embeds[newname] = self.bot.embeds[oldname]
self.bot.embeds.pop(oldname)
await ctx.send('Server has been renamed.')
await self.bot.audit(
f'User {ctx.message.author.display_name} renamed DCS server "{oldname}" to "{newname}".')
except (Exception, psycopg2.DatabaseError) as error:
self.log.exception(error)
conn.rollback()
finally:
self.pool.putconn(conn)
else:
await ctx.send('Please stop server "{}" before renaming!'.format(oldname))
@tasks.loop(minutes=1.0)
async def update_bot_status(self):
for server_name, server in self.bot.DCSServers.items():
if server['status'] in ['Loading', 'Stopped', 'Running', 'Paused']:
await self.bot.change_presence(activity=discord.Game(self.STATUS_EMOJI[server['status']] + ' ' +
re.sub(self.config['FILTER']['SERVER_FILTER'],
'', server_name).strip()))
await asyncio.sleep(10)
class Master(Agent):
@commands.command(description='Prune unused data in the database', hidden=True)
@utils.has_role('Admin')
@commands.guild_only()
async def prune(self, ctx):
if not await utils.yn_question(self, ctx, 'This will remove unused data from your database and compact '
'it.\nAre you sure?'):
return
conn = self.pool.getconn()
try:
with closing(conn.cursor()) as cursor:
cursor.execute('CREATE TEMPORARY TABLE temp_players (discord_id BIGINT)')
cursor.execute('CREATE TEMPORARY TABLE temp_missions (id SERIAL PRIMARY KEY, server_name TEXT NOT '
'NULL, mission_name TEXT NOT NULL, mission_theatre TEXT NOT NULL, mission_start '
'TIMESTAMP NOT NULL DEFAULT NOW(), mission_end TIMESTAMP)')
cursor.execute('CREATE TEMPORARY TABLE temp_statistics (mission_id INTEGER NOT NULL, player_ucid TEXT '
'NOT NULL, slot TEXT NOT NULL, kills INTEGER DEFAULT 0, pvp INTEGER DEFAULT 0, '
'deaths INTEGER DEFAULT 0, ejections INTEGER DEFAULT 0, crashes INTEGER DEFAULT 0, '
'teamkills INTEGER DEFAULT 0, kills_planes INTEGER DEFAULT 0, kills_helicopters '
'INTEGER DEFAULT 0, kills_ships INTEGER DEFAULT 0, kills_sams INTEGER DEFAULT 0, '
'kills_ground INTEGER DEFAULT 0, deaths_pvp INTEGER DEFAULT 0, deaths_planes INTEGER '
'DEFAULT 0, deaths_helicopters INTEGER DEFAULT 0, deaths_ships INTEGER DEFAULT 0, '
'deaths_sams INTEGER DEFAULT 0, deaths_ground INTEGER DEFAULT 0, takeoffs INTEGER '
'DEFAULT 0, landings INTEGER DEFAULT 0, hop_on TIMESTAMP NOT NULL DEFAULT NOW(), '
'hop_off TIMESTAMP, PRIMARY KEY (mission_id, player_ucid, slot, hop_on))')
for member in self.bot.guilds[0].members:
cursor.execute('INSERT INTO temp_players VALUES (%s)', (member.id, ))
cursor.execute('SELECT COUNT(*) FROM statistics s, players p WHERE | |
<reponame>Imperas/riscv-dv
"""
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Regression script for RISC-V random instruction generator
"""
import argparse
import os
import subprocess
import re
import sys
import logging
from datetime import date
from scripts.lib import *
from scripts.spike_log_to_trace_csv import *
from scripts.ovpsim_log_to_trace_csv import *
from scripts.whisper_log_trace_csv import *
from scripts.sail_log_to_trace_csv import *
from scripts.instr_trace_compare import *
LOGGER = logging.getLogger()
def get_generator_cmd(simulator, simulator_yaml, cov):
""" Setup the compile and simulation command for the generator
Args:
simulator : RTL simulator used to run instruction generator
simulator_yaml : RTL simulator configuration file in YAML format
cov : Enable functional coverage
Returns:
compile_cmd : RTL simulator command to compile the instruction generator
sim_cmd : RTL simulator command to run the instruction generator
"""
logging.info("Processing simulator setup file : %s" % simulator_yaml)
yaml_data = read_yaml(simulator_yaml)
# Search for matched simulator
for entry in yaml_data:
if entry['tool'] == simulator:
logging.info("Found matching simulator: %s" % entry['tool'])
compile_spec = entry['compile']
compile_cmd = compile_spec['cmd']
for i in range(len(compile_cmd)):
if ('cov_opts' in compile_spec) and cov:
compile_cmd[i] = re.sub('<cov_opts>', compile_spec['cov_opts'].rstrip(), compile_cmd[i])
else:
compile_cmd[i] = re.sub('<cov_opts>', '', compile_cmd[i])
sim_cmd = entry['sim']['cmd']
if ('cov_opts' in entry['sim']) and cov:
sim_cmd = re.sub('<cov_opts>', entry['sim']['cov_opts'].rstrip(), sim_cmd)
else:
sim_cmd = re.sub('<cov_opts>', '', sim_cmd)
if 'env_var' in entry:
for env_var in entry['env_var'].split(','):
for i in range(len(compile_cmd)):
compile_cmd[i] = re.sub("<"+env_var+">", get_env_var(env_var), compile_cmd[i])
sim_cmd = re.sub("<"+env_var+">", get_env_var(env_var), sim_cmd)
return compile_cmd, sim_cmd
logging.error("Cannot find RTL simulator %0s" % simulator)
sys.exit(1)
def parse_iss_yaml(iss, iss_yaml, isa, setting_dir):
"""Parse ISS YAML to get the simulation command
Args:
iss : target ISS used to look up in ISS YAML
iss_yaml : ISS configuration file in YAML format
isa : ISA variant passed to the ISS
setting_dir : Generator setting directory
Returns:
cmd : ISS run command
"""
logging.info("Processing ISS setup file : %s" % iss_yaml)
yaml_data = read_yaml(iss_yaml)
cwd = os.path.dirname(os.path.realpath(__file__))
# Search for matched ISS
for entry in yaml_data:
if entry['iss'] == iss:
logging.info("Found matching ISS: %s" % entry['iss'])
cmd = entry['cmd'].rstrip()
cmd = re.sub("\<path_var\>", get_env_var(entry['path_var']), cmd)
if iss == "ovpsim":
cmd = re.sub("\<cfg_path\>", setting_dir, cmd)
elif iss == "whisper":
m = re.search(r"rv(?P<xlen>[0-9]+?)(?P<variant>[a-z]+?)$", isa)
if m:
# TODO: Support u/s mode
cmd = re.sub("\<xlen\>", m.group('xlen'), cmd)
variant = re.sub('g', 'imafd', m.group('variant'))
cmd = re.sub("\<variant\>", variant, cmd)
else:
logging.error("Illegal ISA %0s" % isa)
cmd = re.sub("\<xlen\>", setting_dir, cmd)
else:
cmd = re.sub("\<variant\>", isa, cmd)
return cmd
logging.error("Cannot find ISS %0s" % iss)
sys.exit(1)
def get_iss_cmd(base_cmd, elf, log):
"""Get the ISS simulation command
Args:
base_cmd : Original command template
elf : ELF file to run ISS simualtion
log : ISS simulation log name
Returns:
cmd : Command for ISS simulation
"""
cmd = re.sub("\<elf\>", elf, base_cmd)
cmd += (" &> %s" % log)
return cmd
def gen(test_list, csr_file, end_signature_addr, isa, simulator,
simulator_yaml, output_dir, sim_only, compile_only, lsf_cmd, seed,
cwd, cmp_opts, sim_opts, timeout_s, core_setting_dir, ext_dir, cov,
log_suffix, batch_size, seed_yaml, stop_on_first_error, verbose=False):
"""Run the instruction generator
Args:
test_list : List of assembly programs to be compiled
csr_file : YAML file containing description of all CSRs
end_signature_addr : Address that tests will write pass/fail signature to at end of test
isa : Processor supported ISA subset
simulator : RTL simulator used to run instruction generator
simulator_yaml : RTL simulator configuration file in YAML format
output_dir : Output directory of the ELF files
sim_only : Simulation only
compile_only : Compile the generator only
lsf_cmd : LSF command used to run the instruction generator
cwd : Filesystem path to RISCV-DV repo
seed : Seed to the instruction generator
cmp_opts : Compile options for the generator
sim_opts : Simulation options for the generator
timeout_s : Timeout limit in seconds
core_setting_dir : Path for riscv_core_setting.sv
ext_dir : User extension directory
cov : Enable functional coverage
log_suffix : Simulation log file name suffix
batch_size : Number of tests to generate per run
seed_yaml : Seed specification from a prior regression
stop_on_first_error : will end run on first error detected
"""
# Mutually exclusive options between compile_only and sim_only
if compile_only and sim_only:
logging.error("argument -co is not allowed with argument -so")
# Setup the compile and simulation command for the generator
compile_cmd = []
sim_cmd = ""
compile_cmd, sim_cmd = get_generator_cmd(simulator, simulator_yaml, cov);
if ((compile_only == 0) and (len(test_list) == 0)):
return
# Compile the instruction generator
if not sim_only:
if (not((len(test_list) == 1) and (test_list[0]['test'] == 'riscv_csr_test'))):
logging.info("Building RISC-V instruction generator")
for cmd in compile_cmd:
cmd = re.sub("<out>", os.path.abspath(output_dir), cmd)
cmd = re.sub("<setting>", core_setting_dir, cmd)
if ext_dir == "":
cmd = re.sub("<user_extension>", "<cwd>/user_extension", cmd)
else:
cmd = re.sub("<user_extension>", ext_dir, cmd)
cmd = re.sub("<cwd>", cwd, cmd)
cmd = re.sub("<cmp_opts>", cmp_opts, cmd)
logging.debug("Compile command: %s" % cmd)
output = run_cmd(cmd)
# Run the instruction generator
if not compile_only:
cmd_list = []
sim_cmd = re.sub("<out>", os.path.abspath(output_dir), sim_cmd)
sim_cmd = re.sub("<cwd>", cwd, sim_cmd)
sim_cmd = re.sub("<sim_opts>", sim_opts, sim_cmd)
if seed_yaml:
rerun_seed = read_yaml(seed_yaml)
else:
rerun_seed = {}
logging.info("Running RISC-V instruction generator")
sim_seed = {}
for test in test_list:
iterations = test['iterations']
logging.info("Generating %d %s" % (iterations, test['test']))
if iterations > 0:
"""
If we are running a CSR test, need to call a separate python script
to generate directed CSR test code, located at scripts/gen_csr_test.py.
"""
if test['test'] == 'riscv_csr_test':
cmd = "python3 " + cwd + "/scripts/gen_csr_test.py" + \
(" --csr_file %s" % csr_file) + \
(" --xlen %s" % re.search(r"(?P<xlen>[0-9]+)", isa).group("xlen")) + \
(" --iterations %i" % iterations) + \
(" --out %s/asm_tests" % output_dir) + \
(" --end_signature_addr %s" % end_signature_addr)
if lsf_cmd:
cmd_list.append(cmd)
else:
output = run_cmd(cmd, timeout_s)
else:
if batch_size > 0:
batch_cnt = int((iterations + batch_size - 1) / batch_size);
else:
batch_cnt = 1
logging.info("Running %s with %0d batches" % (test['test'], batch_cnt))
for i in range(0, batch_cnt):
test_id = '%0s_%0d' % (test['test'], i)
if test_id in rerun_seed:
rand_seed = rerun_seed[test_id]
else:
rand_seed = get_seed(seed)
if i < batch_cnt - 1:
test_cnt = batch_size
else:
test_cnt = iterations - i * batch_size;
cmd = lsf_cmd + " " + sim_cmd.rstrip() + \
(" +UVM_TESTNAME=%s " % test['gen_test']) + \
(" +num_of_tests=%i " % test_cnt) + \
(" +start_idx=%d " % (i*batch_size)) + \
(" +asm_file_name=%s/asm_tests/%s " % (output_dir, test['test'])) + \
(" -l %s/sim_%s_%d%s.log " % (output_dir, test['test'], i, log_suffix))
if verbose:
cmd += "+UVM_VERBOSITY=UVM_HIGH "
cmd = re.sub("<seed>", str(rand_seed), cmd)
sim_seed[test_id] = str(rand_seed)
if "gen_opts" in test:
cmd += test['gen_opts']
if not re.search("c", isa):
cmd += "+disable_compressed_instr=1 ";
if lsf_cmd:
cmd_list.append(cmd)
else:
logging.info("Running %s, batch %0d/%0d, test_cnt:%0d" %
(test['test'], i+1, batch_cnt, test_cnt))
output = run_cmd(cmd, timeout_s)
if sim_seed:
with open(('%s/seed.yaml' % os.path.abspath(output_dir)) , 'w') as outfile:
yaml.dump(sim_seed, outfile, default_flow_style=False)
if lsf_cmd:
run_parallel_cmd(cmd_list, timeout_s)
def gcc_compile(test_list, output_dir, isa, mabi, opts):
"""Use riscv gcc toolchain to compile the assembly program
Args:
test_list : List of assembly programs to be compiled
output_dir : Output directory of the ELF files
isa : ISA variant passed to GCC
mabi : MABI variant passed to GCC
"""
cwd = os.path.dirname(os.path.realpath(__file__))
for test in test_list:
for i in range(0, test['iterations']):
if 'no_gcc' in test and test['no_gcc'] == 1:
continue
prefix = ("%s/asm_tests/%s_%d" % (output_dir, test['test'], i))
asm = prefix + ".S"
elf = prefix + ".o"
binary = prefix + ".bin"
test_isa = isa
# gcc comilation
cmd = ("%s -static -mcmodel=medany \
-fvisibility=hidden -nostdlib \
-nostartfiles %s \
-I%s/user_extension \
-T%s/scripts/link.ld %s -o %s " % \
(get_env_var("RISCV_GCC"), asm, cwd, cwd, opts, elf))
if 'gcc_opts' in test:
cmd += test['gcc_opts']
if 'gen_opts' in test:
# Disable compressed instruction
if re.search('disable_compressed_instr', test['gen_opts']):
test_isa = re.sub("c", "", test_isa)
# If march/mabi is not defined in the test gcc_opts, use the default
# setting from the command | |
4767 9.550395119310684851200738257147825E-2871 4.775197559655342425600369128573913E-2871
4768 2.387598779827671212800184564286957E-2871 1.193799389913835606400092282143478E-2871
4769 5.96899694956917803200046141071739E-2872 2.984498474784589016000230705358695E-2872
4770 1.492249237392294508000115352679348E-2872 7.461246186961472540000576763396738E-2873
4771 3.730623093480736270000288381698369E-2873 1.865311546740368135000144190849185E-2873
4772 9.326557733701840675000720954245925E-2874 4.663278866850920337500360477122963E-2874
4773 2.331639433425460168750180238561482E-2874 1.165819716712730084375090119280741E-2874
4774 5.829098583563650421875450596403705E-2875 2.914549291781825210937725298201853E-2875
4775 1.457274645890912605468862649100927E-2875 7.286373229454563027344313245504633E-2876
4776 3.643186614727281513672156622752317E-2876 1.821593307363640756836078311376158E-2876
4777 9.10796653681820378418039155688079E-2877 4.553983268409101892090195778440395E-2877
4778 2.276991634204550946045097889220198E-2877 1.138495817102275473022548944610099E-2877
4779 5.692479085511377365112744723050495E-2878 2.846239542755688682556372361525248E-2878
4780 1.423119771377844341278186180762624E-2878 7.11559885688922170639093090381312E-2879
4781 3.55779942844461085319546545190656E-2879 1.77889971422230542659773272595328E-2879
4782 8.8944985711115271329886636297664E-2880 4.4472492855557635664943318148832E-2880
4783 2.2236246427778817832471659074416E-2880 1.1118123213889408916235829537208E-2880
4784 5.559061606944704458117914768604E-2881 2.779530803472352229058957384302E-2881
4785 1.389765401736176114529478692151E-2881 6.948827008680880572647393460755E-2882
4786 3.4744135043404402863236967303775E-2882 1.73720675217022014316184836518875E-2882
4787 8.68603376085110071580924182594375E-2883 4.343016880425550357904620912971875E-2883
4788 2.171508440212775178952310456485938E-2883 1.085754220106387589476155228242969E-2883
4789 5.428771100531937947380776141214845E-2884 2.714385550265968973690388070607423E-2884
4790 1.357192775132984486845194035303712E-2884 6.785963875664922434225970176518558E-2885
4791 3.392981937832461217112985088259279E-2885 1.696490968916230608556492544129640E-2885
4792 8.48245484458115304278246272064820E-2886 4.24122742229057652139123136032410E-2886
4793 2.12061371114528826069561568016205E-2886 1.060306855572644130347807840081025E-2886
4794 5.301534277863220651739039200405125E-2887 2.650767138931610325869519600202563E-2887
4795 1.325383569465805162934759800101282E-2887 6.626917847329025814673799000506408E-2888
4796 3.313458923664512907336899500253204E-2888 1.656729461832256453668449750126602E-2888
4797 8.28364730916128226834224875063301E-2889 4.141823654580641134171124375316505E-2889
4798 2.070911827290320567085562187658253E-2889 1.035455913645160283542781093829126E-2889
4799 5.17727956822580141771390546914563E-2890 2.588639784112900708856952734572815E-2890
4800 1.294319892056450354428476367286408E-2890 6.471599460282251772142381836432038E-2891
4801 3.235799730141125886071190918216019E-2891 1.617899865070562943035595459108010E-2891
4802 8.08949932535281471517797729554005E-2892 4.044749662676407357588988647770025E-2892
4803 2.022374831338203678794494323885013E-2892 1.011187415669101839397247161942506E-2892
4804 5.05593707834550919698623580971253E-2893 2.527968539172754598493117904856265E-2893
4805 1.263984269586377299246558952428133E-2893 6.319921347931886496232794762140663E-2894
4806 3.159960673965943248116397381070332E-2894 1.579980336982971624058198690535166E-2894
4807 7.89990168491485812029099345267583E-2895 3.949950842457429060145496726337915E-2895
4808 1.974975421228714530072748363168958E-2895 9.874877106143572650363741815844788E-2896
4809 4.937438553071786325181870907922394E-2896 2.468719276535893162590935453961197E-2896
4810 1.234359638267946581295467726980599E-2896 6.171798191339732906477338634902993E-2897
4811 3.085899095669866453238669317451497E-2897 1.542949547834933226619334658725748E-2897
4812 7.71474773917466613309667329362874E-2898 3.85737386958733306654833664681437E-2898
4813 1.928686934793666533274168323407185E-2898 9.643434673968332666370841617035925E-2899
4814 4.821717336984166333185420808517963E-2899 2.410858668492083166592710404258981E-2899
4815 1.205429334246041583296355202129491E-2899 6.027146671230207916481776010647453E-2900
4816 3.013573335615103958240888005323727E-2900 1.506786667807551979120444002661863E-2900
4817 7.533933339037759895602220013309315E-2901 3.766966669518879947801110006654658E-2901
4818 1.883483334759439973900555003327329E-2901 9.417416673797199869502775016636645E-2902
4819 4.708708336898599934751387508318323E-2902 2.354354168449299967375693754159161E-2902
4820 1.177177084224649983687846877079581E-2902 5.885885421123249918439234385397903E-2903
4821 2.942942710561624959219617192698952E-2903 1.471471355280812479609808596349476E-2903
4822 7.35735677640406239804904298174738E-2904 3.67867838820203119902452149087369E-2904
4823 1.839339194101015599512260745436845E-2904 9.196695970505077997561303727184225E-2905
4824 4.598347985252538998780651863592113E-2905 2.299173992626269499390325931796056E-2905
4825 1.149586996313134749695162965898028E-2905 5.74793498156567374847581482949014E-2906
4826 2.87396749078283687423790741474507E-2906 1.436983745391418437118953707372535E-2906
4827 7.184918726957092185594768536862675E-2907 3.592459363478546092797384268431338E-2907
4828 1.796229681739273046398692134215669E-2907 8.981148408696365231993460671078345E-2908
4829 4.490574204348182615996730335539173E-2908 2.245287102174091307998365167769586E-2908
4830 1.122643551087045653999182583884793E-2908 5.613217755435228269995912919423965E-2909
4831 2.806608877717614134997956459711983E-2909 1.403304438858807067498978229855991E-2909
4832 7.016522194294035337494891149279955E-2910 3.508261097147017668747445574639978E-2910
4833 1.754130548573508834373722787319989E-2910 8.770652742867544171868613936599945E-2911
4834 4.385326371433772085934306968299973E-2911 2.192663185716886042967153484149986E-2911
4835 1.096331592858443021483576742074993E-2911 5.481657964292215107417883710374965E-2912
4836 2.740828982146107553708941855187483E-2912 1.370414491073053776854470927593741E-2912
4837 6.852072455365268884272354637968705E-2913 3.426036227682634442136177318984353E-2913
4838 1.713018113841317221068088659492177E-2913 8.565090569206586105340443297460883E-2914
4839 4.282545284603293052670221648730442E-2914 2.141272642301646526335110824365221E-2914
4840 1.070636321150823263167555412182611E-2914 5.353181605754116315837777060913053E-2915
4841 2.676590802877058157918888530456527E-2915 1.338295401438529078959444265228263E-2915
4842 6.691477007192645394797221326141315E-2916 3.345738503596322697398610663070658E-2916
4843 1.672869251798161348699305331535329E-2916 8.364346258990806743496526657676645E-2917
4844 4.182173129495403371748263328838323E-2917 2.091086564747701685874131664419161E-2917
4845 1.045543282373850842937065832209581E-2917 5.227716411869254214685329161047903E-2918
4846 2.613858205934627107342664580523952E-2918 1.306929102967313553671332290261976E-2918
4847 6.53464551483656776835666145130988E-2919 3.26732275741828388417833072565494E-2919
4848 1.63366137870914194208916536282747E-2919 8.16830689354570971044582681413735E-2920
4849 4.084153446772854855222913407068675E-2920 2.042076723386427427611456703534338E-2920
4850 1.021038361693213713805728351767169E-2920 5.105191808466068569028641758835845E-2921
4851 2.552595904233034284514320879417923E-2921 1.276297952116517142257160439708961E-2921
4852 6.381489760582585711285802198544805E-2922 3.190744880291292855642901099272403E-2922
4853 1.595372440145646427821450549636202E-2922 7.976862200728232139107252748181008E-2923
4854 3.988431100364116069553626374090504E-2923 1.994215550182058034776813187045252E-2923
4855 9.97107775091029017388406593522626E-2924 4.98553887545514508694203296761313E-2924
4856 2.492769437727572543471016483806565E-2924 1.246384718863786271735508241903283E-2924
4857 6.231923594318931358677541209516415E-2925 3.115961797159465679338770604758208E-2925
4858 1.557980898579732839669385302379104E-2925 7.78990449289866419834692651189552E-2926
4859 3.89495224644933209917346325594776E-2926 1.94747612322466604958673162797388E-2926
4860 9.7373806161233302479336581398694E-2927 4.8686903080616651239668290699347E-2927
4861 2.43434515403083256198341453496735E-2927 1.217172577015416280991707267483675E-2927
4862 6.085862885077081404958536337418375E-2928 3.042931442538540702479268168709188E-2928
4863 1.521465721269270351239634084354594E-2928 7.60732860634635175619817042177297E-2929
4864 3.803664303173175878099085210886485E-2929 1.901832151586587939049542605443243E-2929
4865 9.509160757932939695247713027216215E-2930 4.754580378966469847623856513608108E-2930
4866 2.377290189483234923811928256804054E-2930 1.188645094741617461905964128402027E-2930
4867 5.943225473708087309529820642010135E-2931 2.971612736854043654764910321005068E-2931
4868 1.485806368427021827382455160502534E-2931 7.42903184213510913691227580251267E-2932
4869 3.714515921067554568456137901256335E-2932 1.857257960533777284228068950628168E-2932
4870 9.28628980266888642114034475314084E-2933 4.64314490133444321057017237657042E-2933
4871 2.32157245066722160528508618828521E-2933 1.160786225333610802642543094142605E-2933
4872 5.803931126668054013212715470713025E-2934 2.901965563334027006606357735356513E-2934
4873 1.450982781667013503303178867678257E-2934 7.254913908335067516515894338391283E-2935
4874 3.627456954167533758257947169195642E-2935 1.813728477083766879128973584597821E-2935
4875 9.068642385418834395644867922989105E-2936 4.534321192709417197822433961494553E-2936
4876 2.267160596354708598911216980747277E-2936 1.133580298177354299455608490373638E-2936
4877 5.66790149088677149727804245186819E-2937 2.833950745443385748639021225934095E-2937
4878 1.416975372721692874319510612967048E-2937 7.084876863608464371597553064835238E-2938
4879 3.542438431804232185798776532417619E-2938 1.771219215902116092899388266208810E-2938
4880 8.85609607951058046449694133104405E-2939 4.428048039755290232248470665522025E-2939
4881 2.214024019877645116124235332761013E-2939 1.107012009938822558062117666380506E-2939
4882 5.53506004969411279031058833190253E-2940 2.767530024847056395155294165951265E-2940
4883 1.383765012423528197577647082975633E-2940 6.918825062117640987888235414878163E-2941
4884 3.459412531058820493944117707439082E-2941 1.729706265529410246972058853719541E-2941
4885 8.648531327647051234860294268597705E-2942 4.324265663823525617430147134298853E-2942
4886 2.162132831911762808715073567149427E-2942 1.081066415955881404357536783574713E-2942
4887 5.405332079779407021787683917873565E-2943 2.702666039889703510893841958936783E-2943
4888 1.351333019944851755446920979468392E-2943 6.756665099724258777234604897341958E-2944
4889 3.378332549862129388617302448670979E-2944 1.689166274931064694308651224335490E-2944
4890 8.44583137465532347154325612167745E-2945 4.222915687327661735771628060838725E-2945
4891 2.111457843663830867885814030419363E-2945 1.055728921831915433942907015209681E-2945
4892 5.278644609159577169714535076048405E-2946 2.639322304579788584857267538024203E-2946
4893 1.319661152289894292428633769012102E-2946 6.598305761449471462143168845060508E-2947
4894 3.299152880724735731071584422530254E-2947 1.649576440362367865535792211265127E-2947
4895 8.247882201811839327678961056325635E-2948 4.123941100905919663839480528162818E-2948
4896 2.061970550452959831919740264081409E-2948 1.030985275226479915959870132040705E-2948
4897 5.154926376132399579799350660203525E-2949 2.577463188066199789899675330101763E-2949
4898 1.288731594033099894949837665050882E-2949 6.443657970165499474749188325254408E-2950
4899 3.221828985082749737374594162627204E-2950 1.610914492541374868687297081313602E-2950
4900 8.05457246270687434343648540656801E-2951 4.027286231353437171718242703284005E-2951
4901 2.013643115676718585859121351642003E-2951 1.006821557838359292929560675821001E-2951
4902 5.034107789191796464647803379105005E-2952 2.517053894595898232323901689552503E-2952
4903 1.258526947297949116161950844776252E-2952 6.292634736489745580809754223881258E-2953
4904 3.146317368244872790404877111940629E-2953 1.573158684122436395202438555970315E-2953
4905 7.865793420612181976012192779851575E-2954 3.932896710306090988006096389925788E-2954
4906 1.966448355153045494003048194962894E-2954 9.83224177576522747001524097481447E-2955
4907 4.916120887882613735007620487407235E-2955 2.458060443941306867503810243703618E-2955
4908 1.229030221970653433751905121851809E-2955 6.145151109853267168759525609259045E-2956
4909 3.072575554926633584379762804629523E-2956 1.536287777463316792189881402314761E-2956
4910 7.681438887316583960949407011573805E-2957 3.840719443658291980474703505786903E-2957
4911 1.920359721829145990237351752893452E-2957 9.601798609145729951186758764467258E-2958
4912 4.800899304572864975593379382233629E-2958 2.400449652286432487796689691116815E-2958
4913 1.200224826143216243898344845558408E-2958 6.001124130716081219491724227792038E-2959
4914 3.000562065358040609745862113896019E-2959 1.500281032679020304872931056948010E-2959
4915 7.50140516339510152436465528474005E-2960 3.750702581697550762182327642370025E-2960
4916 1.875351290848775381091163821185013E-2960 9.376756454243876905455819105925063E-2961
4917 4.688378227121938452727909552962532E-2961 2.344189113560969226363954776481266E-2961
4918 1.172094556780484613181977388240633E-2961 5.860472783902423065909886941203165E-2962
4919 2.930236391951211532954943470601583E-2962 1.465118195975605766477471735300791E-2962
4920 7.325590979878028832387358676503955E-2963 3.662795489939014416193679338251978E-2963
4921 1.831397744969507208096839669125989E-2963 9.156988724847536040484198345629945E-2964
4922 4.578494362423768020242099172814973E-2964 2.289247181211884010121049586407486E-2964
4923 1.144623590605942005060524793203743E-2964 5.723117953029710025302623966018715E-2965
4924 2.861558976514855012651311983009358E-2965 1.430779488257427506325655991504679E-2965
4925 7.153897441287137531628279957523395E-2966 3.576948720643568765814139978761698E-2966
4926 1.788474360321784382907069989380849E-2966 8.942371801608921914535349946904245E-2967
4927 4.471185900804460957267674973452123E-2967 2.235592950402230478633837486726061E-2967
4928 1.117796475201115239316918743363031E-2967 5.588982376005576196584593716815153E-2968
4929 2.794491188002788098292296858407577E-2968 1.397245594001394049146148429203788E-2968
4930 6.98622797000697024573074214601894E-2969 3.49311398500348512286537107300947E-2969
4931 1.746556992501742561432685536504735E-2969 8.732784962508712807163427682523675E-2970
4932 4.366392481254356403581713841261838E-2970 2.183196240627178201790856920630919E-2970
4933 1.091598120313589100895428460315460E-2970 5.457990601567945504477142301577298E-2971
4934 2.728995300783972752238571150788649E-2971 1.364497650391986376119285575394325E-2971
4935 6.822488251959931880596427876971625E-2972 3.411244125979965940298213938485813E-2972
4936 1.705622062989982970149106969242907E-2972 8.528110314949914850745534846214533E-2973
4937 4.264055157474957425372767423107267E-2973 2.132027578737478712686383711553633E-2973
4938 1.066013789368739356343191855776817E-2973 5.330068946843696781715959278884083E-2974
4939 2.665034473421848390857979639442042E-2974 1.332517236710924195428989819721021E-2974
4940 6.662586183554620977144949098605105E-2975 3.331293091777310488572474549302553E-2975
4941 1.665646545888655244286237274651277E-2975 8.328232729443276221431186373256383E-2976
4942 4.164116364721638110715593186628192E-2976 2.082058182360819055357796593314096E-2976
4943 1.041029091180409527678898296657048E-2976 5.20514545590204763839449148328524E-2977
4944 2.60257272795102381919724574164262E-2977 1.30128636397551190959862287082131E-2977
4945 6.50643181987755954799311435410655E-2978 3.253215909938779773996557177053275E-2978
4946 1.626607954969389886998278588526638E-2978 8.133039774846949434991392942633188E-2979
4947 4.066519887423474717495696471316594E-2979 2.033259943711737358747848235658297E-2979
4948 1.016629971855868679373924117829149E-2979 5.083149859279343396869620589145743E-2980
4949 2.541574929639671698434810294572872E-2980 1.270787464819835849217405147286436E-2980
4950 6.35393732409917924608702573643218E-2981 3.17696866204958962304351286821609E-2981
4951 1.588484331024794811521756434108045E-2981 7.942421655123974057608782170540225E-2982
4952 3.971210827561987028804391085270113E-2982 1.985605413780993514402195542635056E-2982
4953 9.92802706890496757201097771317528E-2983 4.96401353445248378600548885658764E-2983
4954 2.48200676722624189300274442829382E-2983 1.24100338361312094650137221414691E-2983
4955 6.20501691806560473250686107073455E-2984 3.102508459032802366253430535367275E-2984
4956 1.551254229516401183126715267683638E-2984 7.756271147582005915633576338418188E-2985
4957 3.878135573791002957816788169209094E-2985 1.939067786895501478908394084604547E-2985
4958 9.695338934477507394541970423022735E-2986 4.847669467238753697270985211511368E-2986
4959 2.423834733619376848635492605755684E-2986 1.211917366809688424317746302877842E-2986
4960 6.05958683404844212158873151438921E-2987 3.029793417024221060794365757194605E-2987
4961 1.514896708512110530397182878597303E-2987 7.574483542560552651985914392986513E-2988
4962 3.787241771280276325992957196493257E-2988 1.893620885640138162996478598246628E-2988
4963 9.46810442820069081498239299123314E-2989 4.73405221410034540749119649561657E-2989
4964 2.367026107050172703745598247808285E-2989 1.183513053525086351872799123904143E-2989
4965 5.917565267625431759363995619520715E-2990 2.958782633812715879681997809760358E-2990
4966 1.479391316906357939840998904880179E-2990 7.396956584531789699204994524400895E-2991
4967 3.698478292265894849602497262200448E-2991 1.849239146132947424801248631100224E-2991
4968 9.24619573066473712400624315550112E-2992 4.62309786533236856200312157775056E-2992
4969 2.31154893266618428100156078887528E-2992 1.15577446633309214050078039443764E-2992
4970 5.7788723316654607025039019721882E-2993 2.8894361658327303512519509860941E-2993
4971 1.44471808291636517562597549304705E-2993 7.22359041458182587812987746523525E-2994
4972 3.611795207290912939064938732617625E-2994 1.805897603645456469532469366308813E-2994
4973 9.029488018227282347662346831544065E-2995 4.514744009113641173831173415772033E-2995
4974 2.257372004556820586915586707886017E-2995 1.128686002278410293457793353943008E-2995
4975 5.64343001139205146728896676971504E-2996 2.82171500569602573364448338485752E-2996
4976 1.41085750284801286682224169242876E-2996 7.0542875142400643341112084621438E-2997
4977 3.5271437571200321670556042310719E-2997 1.76357187856001608352780211553595E-2997
4978 8.81785939280008041763901057767975E-2998 4.408929696400040208819505288839875E-2998
4979 2.204464848200020104409752644419938E-2998 1.102232424100010052204876322209969E-2998
4980 5.511162120500050261024381611049845E-2999 2.755581060250025130512190805524923E-2999
4981 1.377790530125012565256095402762462E-2999 6.888952650625062826280477013812308E-3000
4982 3.444476325312531413140238506906154E-3000 1.722238162656265706570119253453077E-3000
4983 8.611190813281328532850596267265385E-3001 4.305595406640664266425298133632693E-3001
4984 2.152797703320332133212649066816347E-3001 1.076398851660166066606324533408173E-3001
4985 5.381994258300830333031622667040865E-3002 2.690997129150415166515811333520433E-3002
4986 1.345498564575207583257905666760217E-3002 6.727492822876037916289528333801083E-3003
4987 3.363746411438018958144764166900542E-3003 1.681873205719009479072382083450271E-3003
4988 8.409366028595047395361910417251355E-3004 4.204683014297523697680955208625678E-3004
4989 2.102341507148761848840477604312839E-3004 1.051170753574380924420238802156420E-3004
4990 5.25585376787190462210119401078210E-3005 2.62792688393595231105059700539105E-3005
4991 1.313963441967976155525298502695525E-3005 6.569817209839880777626492513477625E-3006
4992 3.284908604919940388813246256738813E-3006 1.642454302459970194406623128369406E-3006
4993 8.21227151229985097203311564184703E-3007 4.106135756149925486016557820923515E-3007
4994 2.053067878074962743008278910461758E-3007 1.026533939037481371504139455230879E-3007
4995 5.132669695187406857520697276154395E-3008 2.566334847593703428760348638077198E-3008
4996 1.283167423796851714380174319038599E-3008 6.415837118984258571900871595192995E-3009
4997 3.207918559492129285950435797596498E-3009 1.603959279746064642975217898798249E-3009
4998 8.019796398730323214876089493991245E-3010 4.009898199365161607438044746995623E-3010
4999 2.004949099682580803719022373497812E-3010 1.002474549841290401859511186748906E-3010
5000 5.01237274920645200929755593374453E-3011 2.506186374603226004648777966872265E-3011
5001 1.253093187301613002324388983436133E-3011 6.265465936508065011621944917180663E-3012
5002 3.132732968254032505810972458590332E-3012 1.566366484127016252905486229295166E-3012
5003 7.83183242063508126452743114647583E-3013 3.915916210317540632263715573237915E-3013
5004 1.957958105158770316131857786618958E-3013 9.789790525793851580659288933094788E-3014
5005 4.894895262896925790329644466547394E-3014 2.447447631448462895164822233273697E-3014
5006 1.223723815724231447582411116636849E-3014 6.118619078621157237912055583184243E-3015
5007 3.059309539310578618956027791592122E-3015 1.529654769655289309478013895796061E-3015
5008 7.648273848276446547390069478980305E-3016 3.824136924138223273695034739490153E-3016
5009 1.912068462069111636847517369745077E-3016 9.560342310345558184237586848725383E-3017
5010 4.780171155172779092118793424362692E-3017 2.390085577586389546059396712181346E-3017
5011 1.195042788793194773029698356090673E-3017 5.975213943965973865148491780453365E-3018
5012 2.987606971982986932574245890226683E-3018 1.493803485991493466287122945113341E-3018
5013 7.469017429957467331435614725566705E-3019 3.734508714978733665717807362783353E-3019
5014 1.867254357489366832858903681391677E-3019 9.336271787446834164294518406958383E-3020
5015 4.668135893723417082147259203479192E-3020 2.334067946861708541073629601739596E-3020
5016 1.167033973430854270536814800869798E-3020 5.83516986715427135268407400434899E-3021
5017 2.917584933577135676342037002174495E-3021 1.458792466788567838171018501087248E-3021
5018 7.29396233394283919085509250543624E-3022 3.64698116697141959542754625271812E-3022
5019 1.82349058348570979771377312635906E-3022 9.1174529174285489885688656317953E-3023
5020 4.55872645871427449428443281589765E-3023 2.279363229357137247142216407948825E-3023
5021 1.139681614678568623571108203974413E-3023 5.698408073392843117855541019872063E-3024
5022 2.849204036696421558927770509936032E-3024 1.424602018348210779463885254968016E-3024
5023 7.12301009174105389731942627484008E-3025 3.56150504587052694865971313742004E-3025
5024 1.78075252293526347432985656871002E-3025 8.9037626146763173716492828435501E-3026
5025 4.45188130733815868582464142177505E-3026 2.225940653669079342912320710887525E-3026
5026 1.112970326834539671456160355443763E-3026 5.564851634172698357280801777218813E-3027
5027 2.782425817086349178640400888609407E-3027 1.391212908543174589320200444304703E-3027
5028 6.956064542715872946601002221523515E-3028 3.478032271357936473300501110761758E-3028
5029 1.739016135678968236650250555380879E-3028 8.695080678394841183251252776904395E-3029
5030 4.347540339197420591625626388452198E-3029 2.173770169598710295812813194226099E-3029
5031 1.086885084799355147906406597113050E-3029 5.434425423996775739532032985565248E-3030
5032 2.717212711998387869766016492782624E-3030 1.358606355999193934883008246391312E-3030
5033 6.79303177999596967441504123195656E-3031 3.39651588999798483720752061597828E-3031
5034 1.69825794499899241860376030798914E-3031 8.4912897249949620930188015399457E-3032
5035 4.24564486249748104650940076997285E-3032 2.122822431248740523254700384986425E-3032
5036 1.061411215624370261627350192493213E-3032 5.307056078121851308136750962466063E-3033
5037 2.653528039060925654068375481233032E-3033 1.326764019530462827034187740616516E-3033
5038 6.63382009765231413517093870308258E-3034 3.31691004882615706758546935154129E-3034
5039 1.658455024413078533792734675770645E-3034 8.292275122065392668963673378853225E-3035
5040 4.146137561032696334481836689426613E-3035 2.073068780516348167240918344713306E-3035
5041 1.036534390258174083620459172356653E-3035 5.182671951290870418102295861783265E-3036
5042 2.591335975645435209051147930891633E-3036 1.295667987822717604525573965445816E-3036
5043 6.47833993911358802262786982722908E-3037 3.23916996955679401131393491361454E-3037
5044 1.61958498477839700565696745680727E-3037 8.09792492389198502828483728403635E-3038
5045 4.048962461945992514142418642018175E-3038 2.024481230972996257071209321009088E-3038
5046 1.012240615486498128535604660504544E-3038 5.06120307743249064267802330252272E-3039
5047 2.53060153871624532133901165126136E-3039 1.26530076935812266066950582563068E-3039
5048 6.3265038467906133033475291281534E-3040 3.1632519233953066516737645640767E-3040
5049 1.58162596169765332583688228203835E-3040 7.90812980848826662918441141019175E-3041
5050 3.954064904244133314592205705095875E-3041 1.977032452122066657296102852547938E-3041
5051 9.88516226061033328648051426273969E-3042 4.942581130305166643240257131369845E-3042
5052 2.471290565152583321620128565684923E-3042 1.235645282576291660810064282842461E-3042
5053 6.178226412881458304050321414212305E-3043 3.089113206440729152025160707106153E-3043
5054 1.544556603220364576012580353553077E-3043 7.722783016101822880062901767765383E-3044
5055 3.861391508050911440031450883882692E-3044 1.930695754025455720015725441941346E-3044
5056 9.65347877012727860007862720970673E-3045 4.826739385063639300039313604853365E-3045
5057 2.413369692531819650019656802426683E-3045 1.206684846265909825009828401213341E-3045
5058 6.033424231329549125049142006066705E-3046 3.016712115664774562524571003033353E-3046
5059 1.508356057832387281262285501516677E-3046 7.541780289161936406311427507583383E-3047
5060 3.770890144580968203155713753791692E-3047 1.885445072290484101577856876895846E-3047
5061 9.42722536145242050788928438447923E-3048 4.713612680726210253944642192239615E-3048
5062 2.356806340363105126972321096119808E-3048 1.178403170181552563486160548059904E-3048
5063 5.89201585090776281743080274029952E-3049 2.94600792545388140871540137014976E-3049
5064 1.47300396272694070435770068507488E-3049 7.3650198136347035217885034253744E-3050
5065 3.6825099068173517608942517126872E-3050 1.8412549534086758804471258563436E-3050
5066 9.206274767043379402235629281718E-3051 4.603137383521689701117814640859E-3051
5067 2.3015686917608448505589073204295E-3051 1.15078434588042242527945366021475E-3051
5068 5.75392172940211212639726830107375E-3052 2.876960864701056063198634150536875E-3052
5069 1.438480432350528031599317075268438E-3052 7.192402161752640157996585376342188E-3053
5070 3.596201080876320078998292688171094E-3053 1.798100540438160039499146344085547E-3053
5071 8.990502702190800197495731720427735E-3054 4.495251351095400098747865860213868E-3054
5072 2.247625675547700049373932930106934E-3054 1.123812837773850024686966465053467E-3054
5073 5.619064188869250123434832325267335E-3055 2.809532094434625061717416162633668E-3055
5074 1.404766047217312530858708081316834E-3055 7.02383023608656265429354040658417E-3056
5075 3.511915118043281327146770203292085E-3056 1.755957559021640663573385101646043E-3056
5076 8.779787795108203317866925508230215E-3057 4.389893897554101658933462754115108E-3057
5077 2.194946948777050829466731377057554E-3057 1.097473474388525414733365688528777E-3057
5078 5.487367371942627073666828442643885E-3058 2.743683685971313536833414221321943E-3058
5079 1.371841842985656768416707110660972E-3058 6.859209214928283842083535553304858E-3059
5080 3.429604607464141921041767776652429E-3059 1.714802303732070960520883888326215E-3059
5081 8.574011518660354802604419441631075E-3060 4.287005759330177401302209720815538E-3060
5082 2.143502879665088700651104860407769E-3060 1.071751439832544350325552430203885E-3060
5083 5.358757199162721751627762151019425E-3061 2.679378599581360875813881075509713E-3061
5084 1.339689299790680437906940537754857E-3061 6.698446498953402189534702688774283E-3062
5085 3.349223249476701094767351344387142E-3062 1.674611624738350547383675672193571E-3062
5086 8.373058123691752736918378360967855E-3063 4.186529061845876368459189180483928E-3063
| |
>> /etc/rc.conf"' %
(r'ifconfig_' + interface + r'=\"' + test_ip + r' netmask 255.255.255.0' + media_settings + r'\"'),
shell_escape=False)
if test_subnet == subnet1 :
route1 = r'static_routes=\"internalnet2\"'
route2 = r'route_internalnet2=\"-net ' + subnet2 + r'.0/24 ' + subnet1 + r'.1\"'
else:
route1 = r'static_routes=\"internalnet1\"'
route2 = r'route_internalnet1=\"-net ' + subnet1 + r'.0/24 ' + subnet2 + r'.1\"'
run('echo "\'%s\' >> /etc/rc.conf"' % route1, shell_escape=False)
run('echo "\'%s\' >> /etc/rc.conf"' % route2, shell_escape=False)
# restart network
run('/etc/rc.d/netif restart')
time.sleep(1)
with settings(warn_only=True):
run('/etc/rc.d/routing restart')
elif htype == 'CYGWIN':
# remove all testbed routes
run('route delete %s.0 -p' % subnet1)
run('route delete %s.0 -p' % subnet2)
# search for right interface based on start of MAC
interface = ''
interfaces_all = run('ipconfig /all')
for line in interfaces_all.splitlines():
if line.find('Ethernet adapter') > -1:
interface = line.replace('Ethernet adapter ', '').replace(':','').rstrip()
if line.find('68-05-CA-') > -1 :
break
# interface config
cmd = r'netsh interface ip set address \"%s\" static %s 255.255.255.0' % (interface, test_ip)
run('"\'%s\'"' % cmd, pty=False, shell_escape=False)
time.sleep(5)
# set static route
# first need to find interface id for routing purposes based on MAC
interface = ''
interfaces_all = run('route print')
for line in interfaces_all.splitlines():
if line.find('68 05 ca') > -1 :
interface = line.lstrip()[0:2]
interface = interface.replace('.', '')
break
if test_subnet == subnet1 :
route = 'route add ' + subnet2 + '.0 mask 255.255.255.0 ' + subnet1 + '.1 if %s -p' % interface
else:
route = 'route add ' + subnet1 + '.0 mask 255.255.255.0 ' + subnet2 + '.1 if %s -p' % interface
run(route, pty=False)
# there seems to be no command line tools on Windows that can set link speed, cause link speed setting
# is implemented in nic driver and can be configured via driver GUI. possible command line solution is
# to manipulate the registry value that store the link speed value for the testbed nic. however, the
# implementation would be specific to the supported nic, as the registry entries are nic specific.
# by default autonegotiation is enabled though, so the switch will force the host to 100, 100,
# show interface speeds
run('wmic NIC where NetEnabled=true get Name, Speed')
elif htype == 'Darwin':
# remove all testbed routes
run('route -n delete %s.0/24' % subnet1)
run('route -n delete %s.0/24' % subnet2)
# setup interface
run('networksetup -setmanual "Ethernet" %s 255.255.255.0' % test_ip)
# set static route
if test_subnet == subnet1 :
par1 = subnet2
par2 = subnet1
else :
par1 = subnet1
par2 = subnet2
interface = 'en0'
run('route -n add %s.0/24 -interface %s' % (par2, interface))
run('cat /Library/StartupItems/AddRoutes/AddRoutes | sed "s/route add .*$/route add %s.0\/24 %s.1/" > __tmp' \
' && mv __tmp /Library/StartupItems/AddRoutes/AddRoutes' %
(par1, par2))
run('chmod a+x /Library/StartupItems/AddRoutes/AddRoutes')
run('/Library/StartupItems/AddRoutes/AddRoutes start')
# XXX for Mac the link speed setting is not permanent for now. need to add new script under StartupItems
# to make this permanent (plus accompanying .plist file), similar to the AddRoutes approach
if link_speed == '10':
run('ifconfig %s media 10baseT/UTP mediaopt full-duplex' % interface)
elif link_speed == '100':
run('ifconfig %s media 100baseTX mediaopt full-duplex' % interface)
else:
run('ifconfig %s media 1000baseT mediaopt full-duplex' % interface)
## Setup testbed network topology (TASK)
## This tasks makes a number of assumptions:
## - One router dumbbell toplogy
## - hosts are numbered and numbers relate to the switch port
## (starting from first port)
## - VLAN number is the same as 3rd octet of IP
## - there are two test subnets 172.16.10.0/24, 172.16.11.0/24
## - interface names are known/hardcoded
# @param switch Switch DNS name
# @param port_prefix Prefix for ports at switch
# @param port_offset Host number to port number offset
@task
# we need to invoke this task with runs_once, as otherwise this task will run once for each host listed in -H
@runs_once
def init_topology(switch='', port_prefix='', port_offset = ''):
"Topology setup"
# sequentially configure switch
execute(init_topology_switch, switch, port_prefix, port_offset)
# configure hosts in parallel
execute(init_topology_host)
## Power cycle hosts via the 9258HP power controllers
@task
@parallel
def power_cycle():
"Power cycle host using the power controller"
# check for wget
local('which wget')
# check if user name and password defined
try:
x = config.TPCONF_power_admin_name
x = config.TPCONF_power_admin_pw
except AttributeError:
abort('TPCONF_power_admin_name and TPCONF_power_admin_pw must be set')
# get type of power controller
try:
ctrl_type = config.TPCONF_power_ctrl_type
except AttributeError:
ctrl_type = '9258HP'
# get IP of power controller and port number of 9258HP host is connected to
try:
ctrl_ip, ctrl_port = config.TPCONF_host_power_ctrlport[env.host_string]
except KeyError:
abort(
'No power controller IP/port defined for host %s' %
env.host_string)
if ctrl_type == '9258HP':
# turn power off
cmd = 'wget -o /dev/null -O /dev/null http://%s/SetPower.cgi?user=%s+pass=%s+p%s=0' % \
(ctrl_ip,
config.TPCONF_power_admin_name,
config.TPCONF_power_admin_pw,
ctrl_port)
local(cmd)
time.sleep(2)
# turn power on
cmd = 'wget -o /dev/null -O /dev/null http://%s/SetPower.cgi?user=%s+pass=%s+p%s=1' % \
(ctrl_ip,
config.TPCONF_power_admin_name,
config.TPCONF_power_admin_pw,
ctrl_port)
local(cmd)
elif ctrl_type == 'SLP-SPP1008':
s = ''
for i in range(1,9):
if i == int(ctrl_port):
s += '1'
else:
s += '0'
s += '00000000' + '00000000'
# turn power off
cmd = 'wget --user=%s --password=%s -o /dev/null -O /dev/null http://%s/offs.cgi?led=%s' % \
(config.TPCONF_power_admin_name,
config.TPCONF_power_admin_pw,
ctrl_ip,
s)
local(cmd)
time.sleep(2)
# turn power on
cmd = 'wget --user=%s --password=%s -o /dev/null -O /dev/null http://%s/ons.cgi?led=%s' % \
(config.TPCONF_power_admin_name,
config.TPCONF_power_admin_pw,
ctrl_ip,
s)
local(cmd)
else:
abort('Unsupported power controller \'%s\'' % ctrl_type)
## Boot host into selected OS (TASK)
# @param file_prefix Prefix for generated pxe boot file
# @param os_list Comma-separated string of OS (Linux, FreeBSD, CYGWIN), one for each host
# @param force_reboot If '0' (host will only be rebooted if OS should be changed,
# if '1' (host will always be rebooted)
# @param do_power_cycle If '0' (never power cycle host),
# if '1' (power cycle host if host does not come up after timeout
# @param boot_timeout Reboot timeout in seconds (integer)
# @param local_dir Directory to put the generated .ipxe files in
# @param linux_kern_router Linux kernel to boot on router
# @param linux_kern_hosts Linux kernel to boot on hosts
# @param tftp_server Specify the TFTP server in the form <server_ip>:<port>
# @param mac_list Comma-separated list of MAC addresses for hosts (MACs of boot interfaces)
# Only required if hosts are unresponsive/inaccessible.
@task
@parallel
def init_os(file_prefix='', os_list='', force_reboot='0', do_power_cycle='0',
boot_timeout='100', local_dir='.',
linux_kern_router='3.10.18-vanilla-10000hz',
linux_kern_hosts='3.9.8-desktop-web10g',
tftp_server='10.1.1.11:8080',
mac_list=''):
"Boot host with selected operating system"
_boot_timeout = int(boot_timeout)
if _boot_timeout < 60:
warn('Boot timeout value too small, using 60 seconds')
_boot_timeout = '60'
host_os_vals = os_list.split(',')
if len(env.all_hosts) < len(host_os_vals):
abort('Number of OSs specified must be the same as number of hosts')
# duplicate last one until we reach correct length
while len(host_os_vals) < len(env.all_hosts):
host_os_vals.append(host_os_vals[-1])
host_mac = {}
if mac_list != '':
mac_vals = mac_list.split(',')
if len(env.all_hosts) != len(mac_vals):
abort('Must specify one MAC address for each host')
# create a dictionary
host_mac = dict(zip(env.all_hosts, mac_vals))
# get type of current host if possible
# XXX try to suppress Fabric exception traceback in case host is not
# accessible, but doesn't seem to work properly
with settings(hide('debug', 'warnings'), warn_only=True):
htype = get_type_cached(env.host_string)
if type(htype) == NetworkError:
# host not accessible, set htype to unknown
htype = '?'
# get dictionary from host and OS lists
host_os = dict(zip(env.all_hosts, host_os_vals))
# os we want
target_os = host_os.get(env.host_string, '')
kern = ''
target_kern = ''
if target_os == 'Linux':
if env.host_string in config.TPCONF_router:
target_kern = linux_kern_router
else:
target_kern = linux_kern_hosts
if htype == 'Linux':
kern = run('uname -r')
else:
kern = '?'
if target_kern == 'running' or target_kern == 'current':
if htype == 'Linux':
target_kern = kern
else:
warn('Host not running Linux, ignoring "running" or "current"')
if target_os != '' and (
force_reboot == '1' or target_os != htype or target_kern != kern):
# write pxe config file
pxe_template = config.TPCONF_script_path + \
'/conf-macaddr_xx\:xx\:xx\:xx\:xx\:xx.ipxe.in'
# if we have a mac address specified use it, otherwise try to automatically
# get the mac address
if env.host_string in host_mac:
mac = host_mac[env.host_string]
else:
mac = get_netmac_cached(env.host_string)
file_name = 'conf-macaddr_' + | |
<reponame>Stratoscale/zadarapy
# Copyright 2019 Zadara Storage, Inc.
# Originally authored by <NAME> - https://github.com/jwbrown77
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from future.standard_library import install_aliases
install_aliases()
from zadarapy.validators import verify_start_limit, verify_name, \
verify_string, verify_group_name, verify_boolean, verify_field, \
verify_not_none
__all__ = ["get_active_directory", "get_all_nas_groups", "get_all_nas_users",
"get_nas_user", "get_nas_group",
"create_nas_group", "create_nas_user",
"change_nas_user_smb_password", "delete_nas_group",
"delete_nas_user",
"join_active_directory", "leave_active_directory",
"update_active_directory_dns"]
def get_all_nas_users(session, start=None, limit=None, return_type=None,
**kwargs):
"""
Retrieves details for all NAS users configured on the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying NAS users from. Optional.
:type: limit: int
:param limit: The maximum number of NAS users to return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
parameters = verify_start_limit(start, limit)
path = '/api/nas/users.json'
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_nas_user(session, username, return_type=None, **kwargs):
"""
Retrieves details for a single NAS user.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type username: str
:param username: The NAS user's username. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
username = verify_name(username)
path = '/api/nas/users/{0}.json'.format(username)
return session.get_api(path=path, return_type=return_type, **kwargs)
def create_nas_user(session, username, nfs_uid=None, smb_password=None,
smb_groupname=None, return_type=None, **kwargs):
"""
Creates a NAS user. Either nfs_uid or smb_password (or both) must be
specified.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type username: str
:param username: The NAS user's username. Required.
:type nfs_uid: int
:param nfs_uid: When using NFS, the UID for the NFS user. This should
correspond to the user's UID in the client system's /etc/passwd file.
"root" and "nobody" users are statically defined by the VPSA.
Optional.
:type smb_password: str
:param smb_password: When using SMB, the password to assign to the SMB
user. This is only necessary when not using guest access on the
volume and when not integrated with an Active Directory server.
Optional.
:type smb_groupname: str
:param smb_groupname: When using SMB, the primary group for the user can
optionally be designated with the NAS group designated here.
Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
username = verify_name(username)
body_values = {'username': username}
if nfs_uid is None and smb_password is None:
raise ValueError('Either the nfs_uid or smb_password (or both)'
'parameters must be specified.')
if nfs_uid is not None:
if nfs_uid < 1 or nfs_uid > 65533:
raise ValueError('"{0}" is not a valid NFS UID.'.format(nfs_uid))
body_values['nfs_uid'] = nfs_uid
if smb_password is not None:
body_values['password'] = <PASSWORD>
if smb_groupname is not None:
body_values['groupname'] = smb_groupname
path = '/api/nas/users.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def change_nas_user_smb_password(session, username, smb_password,
return_type=None, **kwargs):
"""
Changes the SMB password for a NAS user.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type username: str
:param username: The NAS user's username. Required.
:type smb_password: str
:param smb_password: Changes the SMB password to this value. Pass an
empty string to remove the SMB password. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
username = verify_name(username)
verify_string(smb_password)
body_values = {'password': <PASSWORD>password}
path = '/api/nas/users/{0}/password.json'.format(username)
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def delete_nas_user(session, username, return_type=None, **kwargs):
"""
Deletes a NAS user.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type username: str
:param username: The NAS user's username. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
username = verify_name(username)
path = '/api/nas/users/{0}.json'.format(username)
return session.delete_api(path=path, return_type=return_type, **kwargs)
def get_all_nas_groups(session, start=None, limit=None, return_type=None,
**kwargs):
"""
Retrieves details for all NAS groups configured on the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying NAS groups from. Optional.
:type: limit: int
:param limit: The maximum number of NAS groups to return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
parameters = verify_start_limit(start, limit)
path = '/api/nas/groups.json'
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_nas_group(session, groupname, return_type=None, **kwargs):
"""
Retrieves details for a single NAS group.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type groupname: str
:param groupname: The NAS group's groupname. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
groupname = verify_name(groupname)
path = '/api/nas/groups/{0}.json'.format(groupname)
return session.get_api(path=path, return_type=return_type, **kwargs)
def create_nas_group(session, groupname, nfs_gid=None, smb='NO',
return_type=None, **kwargs):
"""
Creates a NAS user. Either nfs_gid must be specified or smb must be set
to 'YES' (or both).
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type groupname: str
:param groupname: The NAS group's groupname. Required.
:type nfs_gid: int
:param nfs_gid: When using NFS, the GID for the NFS user. This should
correspond to the group's GID in the client system's /etc/groups file.
"root" and "nogroup" groups are statically defined by the VPSA.
Optional.
:type smb: str
:param smb: When using SMB, if set to 'YES', this group will be usable by
SMB/CIFS clients. If set to 'NO', this group won't be usable by
SMB/CIFS clients. Optional (set to 'NO' by default).
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
groupname = verify_group_name(groupname)
smb = verify_boolean(smb, "smb")
_check_nfs_gid(nfs_gid, smb)
body_values = {'groupname': groupname, 'smb': smb}
if nfs_gid is not None:
body_values['nfs_gid'] = nfs_gid
path = '/api/nas/groups.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs)
def delete_nas_group(session, groupname, return_type=None, **kwargs):
"""
Deletes a NAS group.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type groupname: str
:param groupname: The NAS group's groupname. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', | |
`x`. The shapes of `x` and `y` satisfy:
`y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
Args:
x: A `Tensor`.
perm: A `Tensor`. Must be one of the following types: `int32`, `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Transpose", name,
tld.op_callbacks, x, perm)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return transpose_eager_fallback(
x, perm, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Transpose", x=x, perm=perm, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tperm",
_op._get_attr_type("Tperm"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Transpose", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Transpose = tf_export("raw_ops.Transpose")(_ops.to_raw_op(transpose))
def transpose_eager_fallback(x, perm, name, ctx):
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx)
_attr_Tperm, (perm,) = _execute.args_to_matching_eager([perm], ctx, _dtypes.int32)
_inputs_flat = [x, perm]
_attrs = ("T", _attr_T, "Tperm", _attr_Tperm)
_result = _execute.execute(b"Transpose", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Transpose", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_UniqueOutput = collections.namedtuple(
"Unique",
["y", "idx"])
def unique(x, out_idx=_dtypes.int32, name=None):
r"""Finds unique elements in a 1-D tensor.
This operation returns a tensor `y` containing all of the unique elements of `x`
sorted in the same order that they occur in `x`; `x` does not need to be sorted.
This operation also returns a tensor `idx` the same size as `x` that contains
the index of each value of `x` in the unique output `y`. In other words:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
Examples:
```
# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
y, idx = unique(x)
y ==> [1, 2, 4, 7, 8]
idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
```
```
# tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]
y, idx = unique(x)
y ==> [4, 5, 1, 2, 3]
idx ==> [0, 1, 2, 3, 4, 4, 0, 1]
```
Args:
x: A `Tensor`. 1-D.
out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, idx).
y: A `Tensor`. Has the same type as `x`.
idx: A `Tensor` of type `out_idx`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Unique", name,
tld.op_callbacks, x, "out_idx", out_idx)
_result = _UniqueOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return unique_eager_fallback(
x, out_idx=out_idx, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if out_idx is None:
out_idx = _dtypes.int32
out_idx = _execute.make_type(out_idx, "out_idx")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Unique", x=x, out_idx=out_idx, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "out_idx",
_op._get_attr_type("out_idx"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Unique", _inputs_flat, _attrs, _result)
_result = _UniqueOutput._make(_result)
return _result
Unique = tf_export("raw_ops.Unique")(_ops.to_raw_op(unique))
def unique_eager_fallback(x, out_idx, name, ctx):
if out_idx is None:
out_idx = _dtypes.int32
out_idx = _execute.make_type(out_idx, "out_idx")
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T, "out_idx", out_idx)
_result = _execute.execute(b"Unique", 2, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Unique", _inputs_flat, _attrs, _result)
_result = _UniqueOutput._make(_result)
return _result
_UniqueV2Output = collections.namedtuple(
"UniqueV2",
["y", "idx"])
def unique_v2(x, axis, out_idx=_dtypes.int32, name=None):
r"""Finds unique elements along an axis of a tensor.
This operation either returns a tensor `y` containing unique elements
along the `axis` of a tensor. The returned unique elements is sorted
in the same order as they occur along `axis` in `x`.
This operation also returns a tensor `idx` that is the same size as
the number of the elements in `x` along the `axis` dimension. It
contains the index in the unique output `y`.
In other words, for an `1-D` tensor `x` with `axis = None:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
For example:
```
# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
y, idx = unique(x)
y ==> [1, 2, 4, 7, 8]
idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
```
For an `2-D` tensor `x` with `axis = 0`:
```
# tensor 'x' is [[1, 0, 0],
# [1, 0, 0],
# [2, 0, 0]]
y, idx = unique(x, axis=0)
y ==> [[1, 0, 0],
[2, 0, 0]]
idx ==> [0, 0, 1]
```
For an `2-D` tensor `x` with `axis = 1`:
```
# tensor 'x' is [[1, 0, 0],
# [1, 0, 0],
# [2, 0, 0]]
y, idx = unique(x, axis=1)
y ==> [[1, 0],
[1, 0],
[2, 0]]
idx ==> [0, 1, 1]
```
Args:
x: A `Tensor`. A `Tensor`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `Tensor` of type `int32` (default: None). The axis of the Tensor to
find the unique elements.
out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, idx).
y: A `Tensor`. Has the same type as `x`.
idx: A `Tensor` of type `out_idx`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "UniqueV2", name,
tld.op_callbacks, x, axis, "out_idx", out_idx)
_result = _UniqueV2Output._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return unique_v2_eager_fallback(
x, axis, out_idx=out_idx, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if out_idx is None:
out_idx = _dtypes.int32
out_idx = _execute.make_type(out_idx, "out_idx")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"UniqueV2", x=x, axis=axis, out_idx=out_idx, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Taxis",
_op._get_attr_type("Taxis"), "out_idx",
_op._get_attr_type("out_idx"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"UniqueV2", _inputs_flat, _attrs, _result)
_result = _UniqueV2Output._make(_result)
return _result
UniqueV2 = tf_export("raw_ops.UniqueV2")(_ops.to_raw_op(unique_v2))
def unique_v2_eager_fallback(x, axis, out_idx, name, ctx):
if out_idx is None:
out_idx = _dtypes.int32
out_idx = _execute.make_type(out_idx, "out_idx")
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx)
_attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], ctx, _dtypes.int64)
_inputs_flat = [x, axis]
_attrs = ("T", _attr_T, "Taxis", _attr_Taxis, "out_idx", out_idx)
_result = _execute.execute(b"UniqueV2", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"UniqueV2", _inputs_flat, _attrs, _result)
_result = _UniqueV2Output._make(_result)
return _result
_UniqueWithCountsOutput = collections.namedtuple(
"UniqueWithCounts",
["y", "idx", "count"])
def unique_with_counts(x, out_idx=_dtypes.int32, name=None):
r"""Finds unique elements in a 1-D tensor.
This operation returns a tensor `y` containing all of the unique elements of `x`
sorted in the same order that they occur in `x`. This operation also returns a
tensor `idx` the same size as `x` that contains the index of each value of `x`
in the unique output `y`. Finally, it returns a third tensor `count` that
contains the count of each element of `y` in `x`. In other words:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
For example:
```
# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
y, idx, count = unique_with_counts(x)
y ==> [1, 2, 4, 7, 8]
idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
count ==> [2, 1, 3, 1, 2]
```
Args:
x: A `Tensor`. 1-D.
out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, idx, count).
y: A `Tensor`. Has the same type as `x`.
idx: A `Tensor` of type `out_idx`.
count: A `Tensor` of type `out_idx`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "UniqueWithCounts", name,
tld.op_callbacks, x, "out_idx", out_idx)
_result = _UniqueWithCountsOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return unique_with_counts_eager_fallback(
x, out_idx=out_idx, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
| |
# Implementation of all commands the Machinekit workbench registers with FreeCAD.
#
# Special attention should be given to MachinekitCommandCenter. Integrating MK with FC
# turned out to be a bit arkward because the existence and communication with MK is entirely
# outside FC's control, which is not what the FC infrastructure is aiming for.
#
# It is required to monitor all MKs, detect new ones and tear down the ones which went away.
# This requires dynamically modifying menu entries and tool bars.
#
# In order to deal with this situation the MK workbench has a concept of an 'active MK'. Once
# a given MK instance has been set as "active" all tools and menu commands operate against
# that MK instance. This is not ideal there are probably a ton of issues undiscovered so far.
# Note that if only MK instance could be found it becomes automatically the active one.
#
# As it turned out having a separate MK workbench wasn't that useful anyway due to all the
# switching between Path and MK. What I really wanted was for MK to extend Path. Also, this
# idea of having independent views for the different aspects of MK turned out to be less
# useful in practice - which is where the Combo view comes in which is added to the Path
# workbench, one per discovered MK instance, making this much nicer to deal with.
import FreeCAD
import FreeCADGui
import MachinekitCombo
import MachinekitExecute
import MachinekitHud
import MachinekitJog
import MachinekitPreferences
import PathScripts.PathLog as PathLog
import PySide.QtCore
import PySide.QtGui
import machinekit
#PathLog.setLevel(PathLog.Level.DEBUG, PathLog.thisModule())
#PathLog.trackModule(PathLog.thisModule())
MachinekitUpdateMS = 50 # update machinekit every 50ms
MachinekitUiHoldoff = 20 # menus and toolbars once a second (20 * 50ms)
MK = None
def _mkerror(mk, msg):
'''Helper function to display an error in a message box.'''
mb = PySide.QtGui.QMessageBox()
mb.setWindowIcon(machinekit.IconResource('machinekiticon.svg'))
mb.setWindowTitle('Machinekit')
mb.setTextFormat(PySide.QtCore.Qt.TextFormat.RichText)
mb.setText("<div align='center'>%s</div>" % '<br/>'.join([mk.name(), ''] + list(msg.messages())))
if msg.isError():
mb.setIcon(PySide.QtGui.QMessageBox.Critical)
elif msg.isText():
mb.setIcon(PySide.QtGui.QMessageBox.Information)
else:
mb.setIcon(PySide.QtGui.QMessageBox.NoIcon)
mb.setStandardButtons(PySide.QtGui.QMessageBox.Ok)
mb.exec_()
def SetMK(mk):
global MK
if MK:
MK.errorUpdate.disconnect(_mkerror)
MK = mk
mk.errorUpdate.connect(_mkerror)
def ActiveMK(setIfNone=False):
if MK:
return MK
mks = [mk for mk in machinekit.Instances() if mk.isValid()]
if 1 == len(mks):
if setIfNone:
SetMK(mks[0])
return mks[0]
return None
class MachinekitCommand(object):
'''Base class for all Machinekit FC commands.
Takes care of adding the dock widget and managing its lifetime.'''
def __init__(self, name, services):
PathLog.track(services)
self.name = name
self.services = services
def IsActive(self):
'''MK commands are typically only available if an MK instance is active and there is at least one document open.'''
return not (ActiveMK() is None or FreeCAD.ActiveDocument is None)
def Activated(self):
'''Upon activation create the dock widget, install a signal handler for the close button
and add the dock widget to FC's mdi.'''
PathLog.track(self.name)
dock = None
if self.haveMK() or ActiveMK(True):
dock = self.activate(ActiveMK())
else:
PathLog.debug('No machinekit instance active')
if dock:
PathLog.debug('Activate first found instance')
for closebutton in [widget for widget in dock.ui.children() if widget.objectName().endswith('closebutton')]:
closebutton.clicked.connect(lambda : self.terminateDock(dock))
FreeCADGui.getMainWindow().addDockWidget(PySide.QtCore.Qt.LeftDockWidgetArea, dock.ui)
def haveMK(self):
'''Return True if it is not required to have an active machinekit instance for this command'''
return False
def serviceNames(self):
'''Return a list of services required for the command to function.'''
return self.services
def terminateDock(self, dock):
'''Callback invoked when the dock widget's close button is pressed.'''
PathLog.track()
dock.terminate()
FreeCADGui.getMainWindow().removeDockWidget(dock.ui)
dock.ui.deleteLater()
class MachinekitCommandJog(MachinekitCommand):
'''FC command to open the Jog dock widget.'''
def __init__(self):
PathLog.track()
super(self.__class__, self).__init__('Jog', ['command', 'status'])
def activate(self, mk):
PathLog.track()
return MachinekitJog.Jog(mk)
def GetResources(self):
PathLog.track()
return {
'Pixmap' : machinekit.FileResource('machinekiticon-jog.svg'),
'MenuText' : 'Jog',
'ToolTip' : 'Jog and DRO interface for machine setup'
}
class MachinekitCommandExecute(MachinekitCommand):
'''FC command to open the Execute dock widget.'''
def __init__(self):
super(self.__class__, self).__init__('Exe', ['command', 'status'])
def activate(self, mk):
return MachinekitExecute.Execute(mk)
def GetResources(self):
return {
'Pixmap' : machinekit.FileResource('machinekiticon-execute.svg'),
'MenuText' : 'Execute',
'ToolTip' : 'Interface for controlling file execution'
}
class MachinekitCommandHud(MachinekitCommand):
'''FC command to add the HUD to the currently active 3d view.'''
def __init__(self):
super(self.__class__, self).__init__('Hud', ['command', 'status'])
def IsActive(self):
return not (ActiveMK() is None or FreeCADGui.ActiveDocument is None)
def activate(self, mk):
MachinekitHud.ToggleHud(mk)
def GetResources(self):
return {
'Pixmap' : machinekit.FileResource('machinekiticon-hud.svg'),
'MenuText' : 'Hud',
'ToolTip' : 'HUD DRO interface for machine setup'
}
class MachinekitCommandCombo(MachinekitCommand):
'''FC command to start the combo dock in the Path workbench.'''
def __init__(self, mk=None):
super(self.__class__, self).__init__('Combo', ['command', 'status'])
self.combo = {}
self.mk = mk
def IsActive(self):
return (not self.mk is None) or MachinekitCommand.IsActive(self)
def haveMK(self):
return not self.mk is None
def activate(self, mk):
if self.mk:
mk = self.mk
dock = self.combo.get(mk)
if dock:
dock.activate()
return None
dock = MachinekitCombo.Combo(mk)
self.combo[mk] = dock
self.mk.errorUpdate.connect(_mkerror)
return dock
def GetResources(self):
return {
'Pixmap' : machinekit.FileResource('machinekiticon.svg'),
'MenuText' : 'Combo',
'ToolTip' : 'Combo interface with all sub-interfaces'
}
def terminateDock(self, dock):
self.mk.errorUpdate.disconnect(_mkerror)
del self.combo[dock.mk]
return MachinekitCommand.terminateDock(self, dock)
class MachinekitCommandPower(MachinekitCommand):
'''FC menu command to toggle the power of the active MK instance.'''
def __init__(self, on):
super(self.__class__, self).__init__('Pwr', ['command', 'status'])
self.on = on
def IsActive(self):
#PathLog.track(self.name)
return ActiveMK() and ActiveMK().isPowered() != self.on
def activate(self, mk):
mk.power()
def GetResources(self):
return {
'MenuText' : "Power %s" % ('ON' if self.on else 'OFF'),
'ToolTip' : 'Turn machinekit controller on/off'
}
class MachinekitCommandHome(MachinekitCommand):
'''FC menu command to home all axes.'''
def __init__(self):
super(self.__class__, self).__init__('Home', ['command', 'status'])
def IsActive(self):
#PathLog.track(self.name)
return ActiveMK() and ActiveMK().isPowered() and not ActiveMK().isHomed()
def activate(self, mk):
mk.home()
def GetResources(self):
return {
'MenuText' : 'Home',
'ToolTip' : 'Home all axes'
}
class MachinekitCommandActivate(MachinekitCommand):
'''FC menu command to activate a MK instance.'''
MenuText = 'Activate'
def __init__(self):
super(self.__class__, self).__init__('Activate', None)
def activate(self, mk):
SetMK(mk)
def GetResources(self):
return {
'MenuText' : self.MenuText,
'ToolTip' : 'Make Machinekit active'
}
class MachinekitCommandActivateNone(MachinekitCommand):
'''FC menu command used when no MK instance can be found.'''
MenuText = '--no MK found--'
def __init__(self):
super(self.__class__, self).__init__('None', None)
def IsActive(self):
return False
def GetResources(self):
return { 'MenuText' : self.MenuText }
ToolbarName = 'MachinekitTools'
ToolbarTools = [MachinekitCommandCombo.__name__, MachinekitCommandHud.__name__, MachinekitCommandJog.__name__, MachinekitCommandExecute.__name__]
MenuName = 'Machine&kit'
MenuList = [MachinekitCommandHome.__name__, 'Separator'] + ToolbarTools
class MachinekitCommandCenter(object):
'''This class orchestrates MK discovery and the associated enabling/disabling of commands.
If enabled it also adds Combo commands to the Path toolbar.'''
def __init__(self):
self.timer = PySide.QtCore.QTimer()
self.timer.setTimerType(PySide.QtCore.Qt.PreciseTimer)
self.timer.timeout.connect(self.tick)
self.commands = []
self._addCommand(MachinekitCommandActivate.__name__, MachinekitCommandActivate())
self._addCommand(MachinekitCommandActivateNone.__name__, MachinekitCommandActivateNone())
self._addCommand(MachinekitCommandPower.__name__ + 'ON', MachinekitCommandPower(True))
self._addCommand(MachinekitCommandPower.__name__ + 'OFF', MachinekitCommandPower(False))
self._addCommand(MachinekitCommandHome.__name__, MachinekitCommandHome())
self._addCommand(MachinekitCommandCombo.__name__, MachinekitCommandCombo())
self._addCommand(MachinekitCommandHud.__name__, MachinekitCommandHud())
self._addCommand(MachinekitCommandJog.__name__, MachinekitCommandJog())
self._addCommand(MachinekitCommandExecute.__name__, MachinekitCommandExecute())
self.active = [cmd.IsActive() for cmd in self.commands]
self.comboTB = {}
self.comboID = 0
self.holdoff = 0
def _addCommand(self, name, cmd):
self.commands.append(cmd)
FreeCADGui.addCommand(name, cmd)
def start(self):
self.timer.start(MachinekitUpdateMS)
def stop(self):
self.timer.stop()
def isActive(self):
return self.timer.isActive()
def tick(self):
'''Periodically called by the timer to updated menus and tool bars depending on
discovered and lost MK instances.'''
self.holdoff = self.holdoff - 1
if machinekit.Instances() or self.holdoff < 1:
machinekit._update()
if self.holdoff < 1:
active = [cmd.IsActive() for cmd in self.commands]
def aString(activation):
return '.'.join(['1' if a else '0' for a in activation])
if self.active != active:
PathLog.info("Command activation changed from %s to %s" % (aString(self.active), aString(active)))
FreeCADGui.updateCommands()
self.active = active
self.refreshActivationMenu()
if MachinekitPreferences.addToPathWB():
self.refreshComboWB()
self.holdoff = MachinekitUiHoldoff
def refreshActivationMenu(self):
modified = False
menu = FreeCADGui.getMainWindow().menuBar().findChild(PySide.QtGui.QMenu, MenuName)
if menu:
mks = [mk for mk in machinekit.Instances() if mk.isValid()]
ma = menu.findChild(PySide.QtGui.QMenu, MachinekitCommandActivate.MenuText)
actions = ma.actions()
if mks:
mkNames = [mk.name() for mk in mks]
for action in actions:
name = action.text()
if name in mkNames:
mkNames.remove(name)
mk = [mk for mk in mks if mk.name() == name][0]
action.setEnabled(mk != MK)
else:
modified = True
ma.removeAction(action)
for name in mkNames:
mk = [mk for mk in mks if mk.name() == name][0]
action = PySide.QtGui.QAction(name, ma)
action.setEnabled(mk != MK)
PathLog.track(mk.name(), [s for s in mk.instance.endpoint])
action.triggered.connect(lambda x=False, mk=mk: self.activate(mk))
ma.addAction(action)
modified = True
else:
if 1 != len(actions) or actions[0].objectName() != MachinekitCommandActivateNone.__name__:
for action in actions:
ma.removeAction(action)
action = PySide.QtGui.QAction(MachinekitCommandActivateNone.MenuText, ma)
action.setEnabled(False)
ma.addAction(action)
modified = True
return modified
def refreshComboWB(self):
if 'PathWorkbench' in FreeCADGui.listWorkbenches():
wb = FreeCADGui.getWorkbench('PathWorkbench')
if hasattr(wb, '__Workbench__'):
MachinekitPreferences.Setup()
mks = {}
for mk in [mk for mk in machinekit.Instances() if mk.isValid()]:
if self.comboTB.get(mk) is None:
name = "%s_%d" % (MachinekitCommandCombo.__name__, self.comboID)
cmd = MachinekitCommandCombo(mk)
self._addCommand(name, cmd)
mks[mk] = (name, cmd)
self.comboID = self.comboID + 1
else:
mks[mk] = self.comboTB[mk]
tb = FreeCADGui.getMainWindow().findChild(PySide.QtGui.QToolBar, 'MachinekitCombo')
if tb:
# first remove all tool buttons which are no longer valid
for mk | |
warning as this issue will be caught in _Reqs() initialization.
if not line and len(splited) < 1:
warn_msg = "[Warning] Empty line detected while filtering lines."
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# In general, first line in requirement definition will include `[`
# in the config file (.ini). Remove it.
if splited[0] == "[":
filtered = splited[1:]
elif "[" in splited[0]:
splited = splited[0].replace("[", "")
filtered = splited
# If `[` is missing, then it could be a formatting issue with
# config file (.ini.). Add to warning.
else:
warn_msg = "[Warning] Format error. `[` could be missing in "
warn_msg += "the config (.ini) file. (line = %s)" % str(line)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# In general, last line in requirement definition will include `]`
# in the config file (.ini). Remove it.
if filtered[-1] == "]":
filtered = filtered[:-1]
elif "]" in filtered[-1]:
filtered[-1] = filtered[-1].replace("]", "")
# If `]` is missing, then it could be a formatting issue with
# config file (.ini.). Add to warning.
else:
warn_msg = "[Warning] Format error. `]` could be missing in "
warn_msg += "the config (.ini) file. (line = %s)" % str(line)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
return filtered
def in_range(self, ver, req):
"""Checks if a version satisfies a version and/or compatibility requirement.
Args:
ver: List whose first item is a config version that needs to be checked
for support status and version compatibility.
e.g. ver = [`1.0`]
req: `_Reqs` class instance that represents a configuration version and
compatibility specifications.
Returns:
Boolean output of checking if version `ver` meets the requirement
stored in `req` (or a `_Reqs` requirements class instance).
"""
# If `req.exclude` is not empty and `ver` is in `req.exclude`,
# no need to proceed to next set of checks as it is explicitly
# NOT supported.
if req.exclude is not None:
for v in ver:
if v in req.exclude:
return False
# If `req.include` is not empty and `ver` is in `req.include`,
# no need to proceed to next set of checks as it is supported and
# NOT unsupported (`req.exclude`).
include_checked = False
if req.include is not None:
for v in ver:
if v in req.include:
return True
include_checked = True
# If `req.range` is not empty, then `ver` is defined with a `range`
# syntax. Check whether `ver` falls under the defined supported
# range.
if req.range != [None, None]:
min_v = req.range[0] # minimum supported version
max_v = req.range[1] # maximum supported version
ver = ver[0] # version to compare
lg = _compare_versions(min_v, ver)["larger"] # `ver` should be larger
sm = _compare_versions(ver, max_v)["smaller"] # `ver` should be smaller
if lg in [ver, "equal"] and sm in [ver, "equal", "inf"]:
return True
else:
err_msg = "[Error] Version is outside of supported range. "
err_msg += "(config = %s, " % str(req.config)
err_msg += "version = %s, " % str(ver)
err_msg += "supported range = %s)" % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
else:
err_msg = ""
if include_checked:
# user config is not supported as per exclude, include, range
# specification.
err_msg = "[Error] Version is outside of supported range. "
else:
# user config is not defined in exclude, include or range. config file
# error.
err_msg = "[Error] Missing specification. "
err_msg += "(config = %s, " % str(req.config)
err_msg += "version = %s, " % str(ver)
err_msg += "supported range = %s)" % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
def _print(self, *args):
"""Prints compatibility check status and failure or warning messages.
Prints to console without using `logging`.
Args:
*args: String(s) that is one of:
[`failures`, # all failures
`successes`, # all successes
`failure_msgs`, # failure message(s) recorded upon failure(s)
`warning_msgs`] # warning message(s) recorded upon warning(s)
Raises:
Exception: If *args not in:
[`failures`, `successes`, `failure_msgs`, `warning_msg`]
"""
def _format(name, arr):
"""Prints compatibility check results with a format.
Args:
name: String that is the title representing list `arr`.
arr: List of items to be printed in a certain format.
"""
title = "### All Compatibility %s ###" % str(name)
tlen = len(title)
print("-"*tlen)
print(title)
print("-"*tlen)
print(" Total # of %s: %s\n" % (str(name), str(len(arr))))
if arr:
for item in arr:
detail = ""
if isinstance(item[1], list):
for itm in item[1]:
detail += str(itm) + ", "
detail = detail[:-2]
else:
detail = str(item[1])
print(" %s ('%s')\n" % (str(item[0]), detail))
else:
print(" No %s" % name)
print("\n")
for p_item in args:
if p_item == "failures":
_format("Failures", self.failures)
elif p_item == "successes":
_format("Successes", self.successes)
elif p_item == "failure_msgs":
_format("Failure Messages", self.error_msg)
elif p_item == "warning_msgs":
_format("Warning Messages", self.warning_msg)
else:
raise Exception(
"[Error] Wrong input provided for %s." % _get_func_name())
def check_compatibility(self):
"""Checks version and dependency compatibility for a given configuration.
`check_compatibility` immediately returns with `False` (or failure status)
if any child process or checks fail. For error and warning messages, either
print `self.(error_msg|warning_msg)` or call `_print` function.
Returns:
Boolean that is a status of the compatibility check result.
"""
# Check if all `Required` configs are found in user configs.
usr_keys = self.usr_config.keys()
for k in six.iterkeys(self.usr_config):
if k not in usr_keys:
err_msg = "[Error] Required config not found in user config."
err_msg += "(required = %s, " % str(k)
err_msg += "user configs = %s)" % str(usr_keys)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([k, err_msg])
return False
# Parse each user config and validate its compatibility.
overall_status = True
for config_name, spec in six.iteritems(self.usr_config):
temp_status = True
# Check under which section the user config is defined.
in_required = config_name in self.required.keys()
in_optional = config_name in self.optional.keys()
in_unsupported = config_name in self.unsupported.keys()
in_dependency = config_name in self.dependency.keys()
# Add to warning if user config is not specified in the config file.
if not (in_required or in_optional or in_unsupported or in_dependency):
warn_msg = "[Error] User config not defined in config file."
warn_msg += "(user config = %s)" % str(config_name)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
self.failures.append([config_name, warn_msg])
temp_status = False
else:
if in_unsupported:
if self.in_range(spec, self.unsupported[config_name]):
err_msg = "[Error] User config is unsupported. It is "
err_msg += "defined under 'Unsupported' section in the config file."
err_msg += " (config = %s, spec = %s)" % (config_name, str(spec))
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
if in_required:
if not self.in_range(spec, self.required[config_name]):
err_msg = "[Error] User config cannot be supported. It is not in "
err_msg += "the supported range as defined in the 'Required' "
err_msg += "section. (config = %s, " % config_name
err_msg += "spec = %s)" % str(spec)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
if in_optional:
if not self.in_range(spec, self.optional[config_name]):
err_msg = "[Error] User config cannot be supported. It is not in "
err_msg += "the supported range as defined in the 'Optional' "
err_msg += "section. (config = %s, " % config_name
err_msg += "spec = %s)" % str(spec)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
# If user config and version has a dependency, check both user
# config + version and dependency config + version are supported.
if in_dependency:
# Get dependency information. The information gets retrieved in the
# following format:
# [`config`, `config _Reqs()`, `dependency`, `dependency _Reqs()`]
dep_list = self.dependency[config_name]
if dep_list:
for rule in dep_list:
cfg = rule[0] # config name
cfg_req = rule[1] # _Reqs() instance for config requirement
dep = rule[2] # dependency name
dep_req = rule[3] # _Reqs() instance for dependency requirement
# Check if user config has a dependency in the following sequence:
# [1] Check user config and the config that has dependency
# are the same. (This is defined as `cfg_status`.)
# [2] Check if dependency is supported.
try:
cfg_name = self.usr_config[cfg]
dep_name = self.usr_config[dep]
cfg_status = self.in_range(cfg_name, cfg_req)
dep_status = self.in_range(dep_name, dep_req)
# If both status's are `True`, then user config meets dependency
# spec.
if cfg_status:
if not dep_status:
# throw error
err_msg = "[Error] User config has a dependency that cannot"
err_msg += " | |
distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
weight : str, optional
Use the specified edge attribute as the edge distance in shortest
path calculations in closeness centrality algorithm
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.local_closeness_centrality(network_graph, radius=400, distance='edge_length')
"""
warnings.warn(
"local_closeness_centrality() is deprecated and will be removed in momepy 0.4.0. "
"Use closeness_centrality() instead.",
FutureWarning,
)
return closeness_centrality(
graph=graph, radius=radius, name=name, distance=distance, weight=weight
)
def closeness_centrality(
graph,
name="closeness",
weight="mm_len",
radius=None,
distance=None,
verbose=True,
**kwargs
):
"""
Calculates the closeness centrality for nodes.
Wrapper around ``networkx.closeness_centrality``.
Closeness centrality of a node `u` is the reciprocal of the
average shortest path distance to `u` over all `n-1` nodes within reachable nodes.
.. math::
C(u) = \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},
where :math:`d(v, u)` is the shortest-path distance between :math:`v` and :math:`u`,
and :math:`n` is the number of nodes that can reach :math:`u`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
name : str, optional
calculated attribute name
weight : str (default 'mm_len')
attribute holding the weight of edge (e.g. length, angle)
radius: int
Include all neighbors of distance <= radius from n
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
**kwargs
kwargs for ``networkx.closeness_centrality``
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.closeness_centrality(network_graph)
"""
netx = graph.copy()
if radius:
lengraph = len(netx)
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _closeness_centrality(
sub, n, length=weight, len_graph=lengraph
)
else:
vals = nx.closeness_centrality(netx, distance=weight, **kwargs)
nx.set_node_attributes(netx, vals, name)
return netx
def betweenness_centrality(
graph,
name="betweenness",
mode="nodes",
weight="mm_len",
endpoints=True,
radius=None,
distance=None,
normalized=False,
verbose=True,
**kwargs
):
"""
Calculates the shortest-path betweenness centrality for nodes.
Wrapper around ``networkx.betweenness_centrality`` or ``networkx.edge_betweenness_centrality``.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`
.. math::
c_B(v) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|v)}{\\sigma(s, t)}
where `V` is the set of nodes, :math:`\\sigma(s, t)` is the number of
shortest :math:`(s, t)`-paths, and :math:`\\sigma(s, t|v)` is the number of
those paths passing through some node `v` other than `s, t`.
If `s = t`, :math:`\\sigma(s, t) = 1`, and if `v` in `{s, t}``,
:math:`\\sigma(s, t|v) = 0`.
Betweenness centrality of an edge `e` is the sum of the
fraction of all-pairs shortest paths that pass through `e`
.. math::
c_B(e) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|e)}{\\sigma(s, t)}
where `V` is the set of nodes, :math:`\\sigma(s, t)` is the number of
shortest :math:`(s, t)`-paths, and :math:`\\sigma(s, t|e)` is the number of
those paths passing through edge `e`.
Adapted from :cite:`porta2006`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
name : str, optional
calculated attribute name
mode : str, default 'nodes'
mode of betweenness calculation. 'node' for node-based, 'edges' for edge-based
weight : str (default 'mm_len')
attribute holding the weight of edge (e.g. length, angle)
radius: int
Include all neighbors of distance <= radius from n
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
normalized : bool, optional
If True the betweenness values are normalized by `2/((n-1)(n-2))`,
where n is the number of nodes in subgraph.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
**kwargs
kwargs for ``networkx.betweenness_centrality`` or ``networkx.edge_betweenness_centrality``
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.betweenness_centrality(network_graph)
Notes
-----
In case of angular betweenness, implementation is based on "Tasos Implementation".
"""
netx = graph.copy()
# has to be Graph not MultiGraph as MG is not supported by networkx2.4
G = nx.Graph()
for u, v, k, data in netx.edges(data=True, keys=True):
if G.has_edge(u, v):
if G[u][v][weight] > netx[u][v][k][weight]:
nx.set_edge_attributes(G, {(u, v): data})
else:
G.add_edge(u, v, **data)
if radius:
for n in tqdm(G, total=len(G), disable=not verbose):
sub = nx.ego_graph(
G, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = nx.betweenness_centrality(
sub, weight=weight, normalized=normalized, **kwargs
)[n]
elif mode == "nodes":
vals = nx.betweenness_centrality(
G, weight=weight, endpoints=endpoints, **kwargs
)
nx.set_node_attributes(netx, vals, name)
elif mode == "edges":
vals = nx.edge_betweenness_centrality(G, weight=weight, **kwargs)
for u, v, k in netx.edges(keys=True):
try:
val = vals[u, v]
except KeyError:
val = vals[v, u]
netx[u][v][k][name] = val
else:
raise ValueError(
"Mode {} is not supported. Use 'nodes' or 'edges'.".format(mode)
)
return netx
def local_betweenness_centrality(
graph,
radius=5,
name="betweenness",
distance=None,
weight=None,
normalized=False,
**kwargs
):
"""
Calculates the shortest-path betweenness centrality for nodes within subgraph.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute. Based on ``networkx.betweenness_centrality``.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`
.. math::
c_B(v) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|v)}{\\sigma(s, t)}
where `V` is the set of nodes, :math:`\\sigma(s, t)` is the number of
shortest :math:`(s, t)`-paths, and :math:`\\sigma(s, t|v)` is the number of
those paths passing through some node `v` other than `s, t`.
If `s = t`, :math:`\\sigma(s, t) = 1`, and if `v` in `{s, t}``,
:math:`\\sigma(s, t|v) = 0`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
weight : str, optional
Use the specified edge attribute as the edge distance in shortest
path calculations in closeness centrality algorithm
normalized : bool, optional
If True the betweenness values are normalized by `2/((n-1)(n-2))`,
where n is the number of nodes in subgraph.
**kwargs
kwargs for ``networkx.betweenness_centrality_subset``
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.local_betweenness_centrality(network_graph, radius=800, distance='edge_length')
"""
warnings.warn(
"local_betweenness_centrality() is deprecated and will be removed in momepy 0.4.0. "
"Use betweenness_centrality() instead.",
FutureWarning,
)
return betweenness_centrality(
graph,
radius=radius,
name=name,
distance=distance,
weight=weight,
normalized=normalized,
**kwargs
)
def _euclidean(n, m):
"""helper for straightness"""
return math.sqrt((n[0] - m[0]) ** 2 + (n[1] - m[1]) ** 2)
def _straightness_centrality(G, weight, normalized=True):
"""
Calculates straightness centrality.
"""
straightness_centrality = {}
for n in G.nodes():
straightness = 0
sp = nx.single_source_dijkstra_path_length(G, n, weight=weight)
if len(sp) > 0 and len(G) > 1:
for target in sp:
if n != target:
network_dist = sp[target]
euclidean_dist = _euclidean(n, target)
straightness = straightness + (euclidean_dist / network_dist)
straightness_centrality[n] = straightness * (1.0 / (len(G) - 1.0))
# normalize to number of nodes-1 in connected part
if normalized:
if len(sp) > 1:
s = (len(G) - 1.0) / (len(sp) - 1.0)
straightness_centrality[n] *= s
else:
straightness_centrality[n] = 0
else:
straightness_centrality[n] = 0.0
return straightness_centrality
def straightness_centrality(
graph,
weight="mm_len",
normalized=True,
name="straightness",
radius=None,
distance=None,
verbose=True,
):
"""
Calculates the straightness centrality for nodes.
.. math::
C_{S}(i)=\\frac{1}{n-1} \\sum_{j \\in V, j \\neq i} \\frac{d_{i j}^{E u}}{d_{i j}}
where :math:`\\mathrm{d}^{\\mathrm{E} \\mathrm{u}}_{\\mathrm{ij}}` is the Euclidean distance
between nodes `i` and `j` along a straight line.
Adapted from :cite:`porta2006`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
weight : str (default 'mm_len')
attribute holding length of edge
normalized : bool
normalize to number of nodes-1 in connected part (for local straightness
is recommended to set to normalized False)
name : str, optional
calculated | |
= out_grid_vector.CreateLayer(
'grid', spat_ref, ogr.wkbPolygon)
grid_layer_defn = grid_layer.GetLayerDefn()
extent = vector_layer.GetExtent() # minx maxx miny maxy
if grid_type == 'hexagon':
# calculate the inner dimensions of the hexagons
grid_width = extent[1] - extent[0]
grid_height = extent[3] - extent[2]
delta_short_x = cell_size * 0.25
delta_long_x = cell_size * 0.5
delta_y = cell_size * 0.25 * (3 ** 0.5)
# Since the grid is hexagonal it's not obvious how many rows and
# columns there should be just based on the number of squares that
# could fit into it. The solution is to calculate the width and
# height of the largest row and column.
n_cols = int(math.floor(grid_width / (3 * delta_long_x)) + 1)
n_rows = int(math.floor(grid_height / delta_y) + 1)
def _generate_polygon(col_index, row_index):
"""Generate a points for a closed hexagon."""
if (row_index + 1) % 2:
centroid = (
extent[0] + (delta_long_x * (1 + (3 * col_index))),
extent[2] + (delta_y * (row_index + 1)))
else:
centroid = (
extent[0] + (delta_long_x * (2.5 + (3 * col_index))),
extent[2] + (delta_y * (row_index + 1)))
x_coordinate, y_coordinate = centroid
hexagon = [(x_coordinate - delta_long_x, y_coordinate),
(x_coordinate - delta_short_x, y_coordinate + delta_y),
(x_coordinate + delta_short_x, y_coordinate + delta_y),
(x_coordinate + delta_long_x, y_coordinate),
(x_coordinate + delta_short_x, y_coordinate - delta_y),
(x_coordinate - delta_short_x, y_coordinate - delta_y),
(x_coordinate - delta_long_x, y_coordinate)]
return hexagon
elif grid_type == 'square':
def _generate_polygon(col_index, row_index):
"""Generate points for a closed square."""
square = [
(extent[0] + col_index * cell_size + x,
extent[2] + row_index * cell_size + y)
for x, y in [
(0, 0), (cell_size, 0), (cell_size, cell_size),
(0, cell_size), (0, 0)]]
return square
n_rows = int((extent[3] - extent[2]) / cell_size)
n_cols = int((extent[1] - extent[0]) / cell_size)
else:
raise ValueError('Unknown polygon type: %s' % grid_type)
for row_index in xrange(n_rows):
for col_index in xrange(n_cols):
polygon_points = _generate_polygon(col_index, row_index)
ring = ogr.Geometry(ogr.wkbLinearRing)
for xoff, yoff in polygon_points:
ring.AddPoint(xoff, yoff)
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
if original_polygon.contains(
shapely.wkt.loads(poly.ExportToWkt())):
poly_feature = ogr.Feature(grid_layer_defn)
poly_feature.SetGeometry(poly)
grid_layer.CreateFeature(poly_feature)
def _build_regression_coefficients(
response_vector_path, predictor_table_path,
tmp_indexed_vector_path, out_coefficient_vector_path,
out_predictor_id_list):
"""Calculate least squares fit for the polygons in the response vector.
Build a least squares regression from the log normalized response vector,
spatial predictor datasets in `predictor_table_path`, and a column of 1s
for the y intercept.
Parameters:
response_vector_path (string): path to a single layer polygon vector.
predictor_table_path (string): path to a CSV file with three columns
'id', 'path' and 'type'. 'id' is the unique ID for that predictor
and must be less than 10 characters long. 'path' indicates the
full or relative path to the `predictor_table_path` table for the
spatial predictor dataset. 'type' is one of:
'point_count': count # of points per response polygon
'point_nearest_distance': distance from nearest point to the
centroid of the response polygon
'line_intersect_length': length of line segments intersecting
each response polygon
'polygon_area': area of predictor polygon intersecting the
response polygon
'polygon_percent_coverage': percent of predictor polygon
intersecting the response polygon
'raster_sum': sum of predictor raster under the response
polygon
'raster_mean': average of predictor raster under the
response polygon
tmp_indexed_vector_path (string): path to temporary working file in
case the response vector needs an index added
out_coefficient_vector_path (string): path to a copy of
`response_vector_path` with the modified predictor variable
responses. Overwritten if exists.
out_predictor_id_list (list): a list that is overwritten with the
predictor ids that are added to the coefficient vector attribute
table.
Returns:
None
"""
response_vector = gdal.OpenEx(response_vector_path, gdal.OF_VECTOR)
response_layer = response_vector.GetLayer()
response_polygons_lookup = {} # maps FID to prepared geometry
for response_feature in response_layer:
feature_geometry = response_feature.GetGeometryRef()
feature_polygon = shapely.wkt.loads(feature_geometry.ExportToWkt())
feature_geometry = None
response_polygons_lookup[response_feature.GetFID()] = feature_polygon
response_layer = None
driver = gdal.GetDriverByName('ESRI Shapefile')
if os.path.exists(out_coefficient_vector_path):
driver.Delete(out_coefficient_vector_path)
out_coefficent_vector = driver.CreateCopy(
out_coefficient_vector_path, response_vector)
response_vector = None
out_coefficent_layer = out_coefficent_vector.GetLayer()
# lookup functions for response types
predictor_functions = {
'point_count': _point_count,
'point_nearest_distance': _point_nearest_distance,
'line_intersect_length': _line_intersect_length,
'polygon_area_coverage': lambda x, y: _polygon_area('area', x, y),
'polygon_percent_coverage': lambda x, y: _polygon_area(
'percent', x, y),
}
predictor_table = natcap.invest.pygeoprocessing_0_3_3.get_lookup_from_csv(
predictor_table_path, 'id')
out_predictor_id_list[:] = predictor_table.keys()
for predictor_id in predictor_table:
LOGGER.info("Building predictor %s", predictor_id)
# Delete the field if it already exists
field_index = out_coefficent_layer.FindFieldIndex(
str(predictor_id), 1)
if field_index >= 0:
out_coefficent_layer.DeleteField(field_index)
predictor_field = ogr.FieldDefn(str(predictor_id), ogr.OFTReal)
predictor_field.SetWidth(24)
predictor_field.SetPrecision(11)
out_coefficent_layer.CreateField(predictor_field)
predictor_path = _sanitize_path(
predictor_table_path, predictor_table[predictor_id]['path'])
predictor_type = predictor_table[predictor_id]['type']
if predictor_type.startswith('raster'):
# type must be one of raster_sum or raster_mean
raster_type = predictor_type.split('_')[1]
raster_sum_mean_results = _raster_sum_mean(
response_vector_path, predictor_path,
tmp_indexed_vector_path)
predictor_results = raster_sum_mean_results[raster_type]
else:
predictor_results = predictor_functions[predictor_type](
response_polygons_lookup, predictor_path)
for feature_id, value in predictor_results.iteritems():
feature = out_coefficent_layer.GetFeature(int(feature_id))
feature.SetField(str(predictor_id), value)
out_coefficent_layer.SetFeature(feature)
out_coefficent_layer = None
out_coefficent_vector.FlushCache()
out_coefficent_vector = None
def _build_temporary_indexed_vector(vector_path, out_fid_index_vector_path):
"""Copy single layer vector and add a field to map feature indexes.
Parameters:
vector_path (string): path to OGR vector
out_fid_index_vector_path (string): desired path to the copied vector
that has an index field added to it
Returns:
fid_field (string): name of FID field added to output vector_path
"""
vector = gdal.OpenEx(vector_path, gdal.OF_VECTOR)
driver = gdal.GetDriverByName('ESRI Shapefile')
if os.path.exists(out_fid_index_vector_path):
os.remove(out_fid_index_vector_path)
fid_indexed_vector = driver.CreateCopy(
out_fid_index_vector_path, vector)
fid_indexed_layer = fid_indexed_vector.GetLayer()
# make a random field name
fid_field = str(uuid.uuid4())[-8:]
fid_field_defn = ogr.FieldDefn(str(fid_field), ogr.OFTInteger)
fid_indexed_layer.CreateField(fid_field_defn)
for feature in fid_indexed_layer:
fid = feature.GetFID()
feature.SetField(fid_field, fid)
fid_indexed_layer.SetFeature(feature)
fid_indexed_vector.FlushCache()
fid_indexed_layer = None
fid_indexed_vector = None
return fid_field
def _raster_sum_mean(
response_vector_path, raster_path, tmp_indexed_vector_path):
"""Sum all non-nodata values in the raster under each polygon.
Parameters:
response_vector_path (string): path to response polygons
raster_path (string): path to a raster.
tmp_indexed_vector_path (string): desired path to a vector that will
be used to add unique indexes to `response_vector_path`
tmp_fid_raster_path (string): desired path to raster that will be used
to aggregate `raster_path` values by unique response IDs.
Returns:
A dictionary indexing 'sum', 'mean', and 'count', to dictionaries
mapping feature IDs from `response_polygons_lookup` to those values
of the raster under the polygon.
"""
fid_field = _build_temporary_indexed_vector(
response_vector_path, tmp_indexed_vector_path)
aggregate_results = natcap.invest.pygeoprocessing_0_3_3.aggregate_raster_values_uri(
raster_path, tmp_indexed_vector_path, shapefile_field=fid_field)
fid_raster_values = {
'sum': aggregate_results.total,
'mean': aggregate_results.pixel_mean,
'count': aggregate_results.n_pixels,
}
return fid_raster_values
def _polygon_area(mode, response_polygons_lookup, polygon_vector_path):
"""Calculate polygon area overlap.
Calculates the amount of projected area overlap from `polygon_vector_path`
with `response_polygons_lookup`.
Parameters:
mode (string): one of 'area' or 'percent'. How this is set affects
the metric that's output. 'area' is the area covered in projected
units while 'percent' is percent of the total response area
covered
response_polygons_lookup (dictionary): maps feature ID to
prepared shapely.Polygon.
polygon_vector_path (string): path to a single layer polygon vector
object.
Returns:
A dictionary mapping feature IDs from `response_polygons_lookup`
to polygon area coverage.
"""
start_time = time.time()
polygons = _ogr_to_geometry_list(polygon_vector_path)
prepped_polygons = [shapely.prepared.prep(polygon) for polygon in polygons]
polygon_spatial_index = rtree.index.Index()
for polygon_index, polygon in enumerate(polygons):
polygon_spatial_index.insert(polygon_index, polygon.bounds)
polygon_coverage_lookup = {} # map FID to point count
for index, (feature_id, geometry) in enumerate(
response_polygons_lookup.iteritems()):
if time.time() - start_time > 5.0:
LOGGER.info(
"%s polygon area: %.2f%% complete",
os.path.basename(polygon_vector_path),
(100.0*index)/len(response_polygons_lookup))
start_time = time.time()
potential_intersecting_poly_ids = polygon_spatial_index.intersection(
geometry.bounds)
intersecting_polygons = [
polygons[polygon_index]
for polygon_index in potential_intersecting_poly_ids
if prepped_polygons[polygon_index].intersects(geometry)]
polygon_area_coverage = sum([
(geometry.intersection(polygon)).area
for polygon in intersecting_polygons])
if mode == 'area':
polygon_coverage_lookup[feature_id] = polygon_area_coverage
elif mode == 'percent':
polygon_coverage_lookup[feature_id] = (
polygon_area_coverage / geometry.area * 100.0)
LOGGER.info(
"%s polygon area: 100.00%% complete",
os.path.basename(polygon_vector_path))
return polygon_coverage_lookup
def _line_intersect_length(response_polygons_lookup, line_vector_path):
"""Calculate the length of the intersecting lines on the response polygon.
Parameters:
response_polygons_lookup (dictionary): maps feature ID to
prepared shapely.Polygon.
line_vector_path (string): path to a single layer point vector
object.
Returns:
A dictionary mapping feature IDs from `response_polygons_lookup`
to line intersect length.
"""
last_time = time.time()
lines = _ogr_to_geometry_list(line_vector_path)
line_length_lookup = {} # map FID to intersecting line length
line_spatial_index = rtree.index.Index()
for line_index, line in enumerate(lines):
line_spatial_index.insert(line_index, line.bounds)
feature_count = None
for feature_count, (feature_id, geometry) in enumerate(
response_polygons_lookup.iteritems()):
last_time = delay_op(
last_time, LOGGER_TIME_DELAY, lambda: LOGGER.info(
"%s line intersect length: %.2f%% complete",
os.path.basename(line_vector_path),
(100.0 * feature_count)/len(response_polygons_lookup)))
potential_intersecting_lines = line_spatial_index.intersection(
geometry.bounds)
line_length = sum([
(lines[line_index].intersection(geometry)).length
for line_index in potential_intersecting_lines if
geometry.intersects(lines[line_index])])
line_length_lookup[feature_id] = line_length
LOGGER.info(
"%s line intersect length: 100.00%% complete",
os.path.basename(line_vector_path))
return line_length_lookup
def _point_nearest_distance(response_polygons_lookup, point_vector_path):
"""Calculate distance to nearest point for all polygons.
Parameters:
response_polygons_lookup (dictionary): maps feature ID to
prepared shapely.Polygon.
point_vector_path (string): path to a single layer point vector
object.
| |
SPENLocalFactor=myParams.myDict['SPENLocalFactor']
F=GT.ExpandWithCopiesOn2(F,DataH,SPENLocalFactor)
feature=tf.concat([tf.real(F),tf.imag(F)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'SPEN_FC':
SR=scipy.io.loadmat('/media/a/H1/SR.mat')
SR=SR['SR']
SR=np.reshape(SR,[DataH,DataH,1])
SR=np.transpose(SR, (2,0,1))
SR_TF=tf.constant(SR)
I=scipy.io.loadmat('/media/a/H1/First1kIm256x256Magint16.mat')
I=I['First1kIm256x256Magint16']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=1000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature=tf.transpose(feature, perm=[1,2,0])
feature = tf.random_crop(feature, [DataH, DataW, 1])
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
feature = tf.cast(feature/mx, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
CurIWithPhase=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(CurIWithPhase),tf.imag(CurIWithPhase)],axis=2)
P=tf.transpose(CurIWithPhase, perm=[2,1,0])
F=tf.matmul(P,SR_TF)
F=tf.transpose(F, perm=[2,1,0])
feature=tf.concat([tf.real(F),tf.imag(F)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'SMASH1DFTxyC':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
Maps=scipy.io.loadmat('/media/a/H1/maps128x128x8.mat')
Mask=Maps['Msk']
Maps=Maps['maps']
nChannels=8
Mask=np.reshape(Mask,[128, 128, 1])
Maps = tf.constant(Maps)
Mask = tf.constant(np.float32(Mask))
# Maps = tf.constant(np.float32(Maps))
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [128, 128, 1])
feature = tf.multiply(feature,Mask)
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
CurIWithPhase=feature*tf.reshape(Q,[DataH,DataW,1])
WithPhaseAndMaps=tf.multiply(CurIWithPhase,Maps)
label=tf.concat([tf.real(CurIWithPhase),tf.imag(CurIWithPhase)],axis=2)
F=GT.gfft_TFOn3D(WithPhaseAndMaps,DataH,0)
F=GT.gfft_TFOn3D(F,DataW,1)
# now subsample 2
F = tf.reshape(F, [64,2, 128, nChannels])
F=tf.slice(F,[0,0,0,0],[-1,1,-1,-1])
F = tf.reshape(F, [64, 128, nChannels])
feature=tf.concat([tf.real(F),tf.imag(F)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == '1DFTxyCMaps':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
Maps=scipy.io.loadmat('/media/a/H1/maps128x128x8.mat')
Mask=Maps['Msk']
Maps=Maps['maps']
nChannels=8
Mask=np.reshape(Mask,[128, 128, 1])
Maps = tf.constant(Maps)
Mask = tf.constant(np.float32(Mask))
# Maps = tf.constant(np.float32(Maps))
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [128, 128, 1])
feature = tf.multiply(feature,Mask)
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
CurIWithPhase=feature*tf.reshape(Q,[DataH,DataW,1])
WithPhaseAndMaps=tf.multiply(CurIWithPhase,Maps)
label=tf.concat([tf.real(CurIWithPhase),tf.imag(CurIWithPhase)],axis=2)
F=GT.gfft_TFOn3D(WithPhaseAndMaps,DataH,0)
F=GT.gfft_TFOn3D(F,DataW,1)
feature=tf.concat([tf.real(F),tf.imag(F)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'M2DFT':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [128, 128, 1])
feature = tf.random_crop(feature, [DataH, DataW, 1])
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=GT.gfft_TF(IQ2,DataH,0)
IQ2=GT.gfft_TF(IQ2,DataW,1)
feature=tf.reshape(IQ2,[DataH*DataW,1,1])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'M1DFTxy':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [128, 128, 1])
feature = tf.random_crop(feature, [DataH, DataW, 1])
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=GT.gfft_TF(IQ2,DataH,0)
IQ2=GT.gfft_TF(IQ2,DataW,1)
feature=tf.reshape(IQ2,[DataH,DataW,1])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'M1DFTx':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [DataH, DataW, 1])
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=GT.gfft_TF(IQ2,DataW,1)
feature=tf.reshape(IQ2,[DataH,DataW,1])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'M1DFTy':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [DataH, DataW, 1])
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=GT.gfft_TF(IQ2,DataH,0)
feature=tf.reshape(IQ2,[DataH,DataW,1])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
#if image_size is None:
# image_size = FLAGS.sample_size
#pdb.set_trace()
reader = tf.TFRecordReader()
filename_queue = tf.train.string_input_producer(filenames)
key, value = reader.read(filename_queue)
AlsoLabel=True
kKick= myParams.myDict['InputMode'] == 'kKick'
if kKick or myParams.myDict['InputMode'] == '1DFTx' or myParams.myDict['InputMode'] == '1DFTy' or myParams.myDict['InputMode'] == '2DFT':
AlsoLabel=False
if myParams.myDict['InputMode'] == 'AAA':
#filename_queue = tf.Print(filename_queue,[filename_queue,],message='ZZZZZZZZZ:')
keyX=key
value = tf.Print(value,[keyX,],message='QQQ:')
featuresA = tf.parse_single_example(
value,
features={
'CurIs': tf.FixedLenFeature([], tf.string),
'Labels': tf.FixedLenFeature([], tf.string)
})
feature = tf.decode_raw(featuresA['Labels'], tf.float32)
CurIs = tf.decode_raw(featuresA['CurIs'], tf.float32)
CurIs = tf.cast(CurIs, tf.int64)
mx=CurIs
# mx='qwe'+
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
label=feature
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
#image = tf.image.decode_jpeg(value, channels=channels, name="dataset_image")
#print('1')
if AlsoLabel:
featuresA = tf.parse_single_example(
value,
features={
'DataH': tf.FixedLenFeature([], tf.int64),
'DataW': tf.FixedLenFeature([], tf.int64),
'channelsIn': tf.FixedLenFeature([], tf.int64),
'LabelsH': tf.FixedLenFeature([], tf.int64),
'LabelsW': tf.FixedLenFeature([], tf.int64),
'channelsOut': tf.FixedLenFeature([], tf.int64),
'data_raw': tf.FixedLenFeature([], tf.string),
'labels_raw': tf.FixedLenFeature([], tf.string)
})
labels = tf.decode_raw(featuresA['labels_raw'], tf.float32)
else:
featuresA = tf.parse_single_example(
value,
features={
'DataH': tf.FixedLenFeature([], tf.int64),
'DataW': tf.FixedLenFeature([], tf.int64),
'channelsIn': tf.FixedLenFeature([], tf.int64),
'data_raw': tf.FixedLenFeature([], tf.string)
})
feature = tf.decode_raw(featuresA['data_raw'], tf.float32)
print('setup_inputs')
print('Data H,W,#ch: %d,%d,%d -> Labels H,W,#ch %d,%d,%d' % (DataH,DataW,channelsIn,LabelsH,LabelsW,channelsOut))
print('------------------')
if myParams.myDict['InputMode'] == '1DFTy':
feature = tf.reshape(feature, [256, 256, 1])
feature = tf.random_crop(feature, [DataH, DataW, channelsIn])
mm=tf.reduce_mean(feature)
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
#feature = tf.Print(feature,[mm,mx],message='QQQ:')
#assert_op = tf.Assert(tf.greater(mx, 0), [mx])
#with tf.control_dependencies([assert_op]):
feature = tf.cast(feature/mx, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,channelsIn])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
feature=label
HalfDataW=DataW/2
Id=np.hstack([np.arange(HalfDataW,DataW), np.arange(0,HalfDataW)])
Id=Id.astype(int)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
feature=tf.fft(IQ2)
feature = tf.gather(feature,Id,axis=1)
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == '1DFTx':
feature = tf.reshape(feature, [256, 256, 1])
feature = tf.random_crop(feature, [DataH, DataW, channelsIn])
mm=tf.reduce_mean(feature)
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
#feature = tf.Print(feature,[mm,mx],message='QQQ:')
#assert_op = tf.Assert(tf.greater(mx, 0), [mx])
#with tf.control_dependencies([assert_op]):
feature = tf.cast(feature/mx, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,channelsIn])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
feature=label
HalfDataH=DataH/2
Id=np.hstack([np.arange(HalfDataH,DataH), np.arange(0,HalfDataH)])
Id=Id.astype(int)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2 = tf.transpose(IQ2, perm=[1, 0])
feature=tf.fft(IQ2)
feature = tf.gather(feature,Id,axis=1)
feature = tf.transpose(feature, perm=[1,0])
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == '2DFT':
feature = tf.reshape(feature, [256, 256, 1])
feature = tf.random_crop(feature, [DataH, DataW, channelsIn])
mm=tf.reduce_mean(feature)
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
#feature = tf.Print(feature,[mm,mx],message='QQQ:')
#assert_op = tf.Assert(tf.greater(mx, 0), [mx])
#with tf.control_dependencies([assert_op]):
feature = tf.cast(feature/mx, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,channelsIn])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
feature=label
HalfDataH=DataH/2
HalfDataW=DataW/2
IdH=np.hstack([np.arange(HalfDataH,DataH), np.arange(0,HalfDataH)])
IdH=IdH.astype(int)
IdW=np.hstack([np.arange(HalfDataW,DataW), np.arange(0,HalfDataW)])
IdW=IdW.astype(int)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=tf.fft(IQ2)
IQ2=tf.gather(IQ2,IdW,axis=1)
IQ2 = tf.transpose(IQ2, perm=[1, 0])
feature=tf.fft(IQ2)
feature = tf.gather(feature,IdH,axis=1)
feature = tf.transpose(feature, perm=[1,0])
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if kKick:
filename_queue2 = tf.train.string_input_producer(filenames)
key2, value2 = reader.read(filename_queue2)
featuresA2 = tf.parse_single_example(
value2,
features={
'DataH': tf.FixedLenFeature([], tf.int64),
'DataW': tf.FixedLenFeature([], tf.int64),
'channelsIn': tf.FixedLenFeature([], tf.int64),
'data_raw': tf.FixedLenFeature([], tf.string)
})
feature2 = tf.decode_raw(featuresA2['data_raw'], tf.float32)
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature2 = tf.reshape(feature2, [DataH, DataW, channelsIn])
feature.set_shape([None, None, channelsIn])
feature2.set_shape([None, None, channelsIn])
feature = tf.cast(feature, tf.float32)/tf.reduce_max(feature)
feature2 = tf.cast(feature2, tf.float32)/tf.reduce_max(feature)
feature= tf.concat([feature,feature*0,feature2,feature2*0], 2)
label=feature
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'RegridTry3':
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'RegridTry2':
FullData=scipy.io.loadmat(myParams.myDict['NMAP_FN'])
NMapCR=FullData['NMapCR']
NMapCR = tf.constant(NMapCR)
feature=tf.gather(feature,NMapCR,validate_indices=None,name=None)
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [128, 128, channelsOut])
# scipy.misc.imresize(arr, size, interp='bilinear', mode=None)
labels = tf.image.resize_images(labels,[LabelsH, LabelsW]) #,method=tf.ResizeMethod.BICUBIC,align_corners=False) # or BILINEAR
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'RegridTry1':
# FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
FullData=scipy.io.loadmat(myParams.myDict['NMAP_FN'])
NMapCR=FullData['NMapCR']
NMapCR = tf.constant(NMapCR)
feature=tf.gather(feature,NMapCR,validate_indices=None,name=None)
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'SMASHTry1':
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
"""if myParams.myDict['Mode'] == 'RegridTry1C2':
FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndC.mat')
NMapCR=FullData['NMapCRC']
NMapCR = tf.constant(NMapCR)
feature=tf.gather(feature,NMapCR,validate_indices=None,name=None)
feature = tf.reshape(feature, [DataH, DataW, channelsIn,2])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels"""
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
#print('44')
#example.ParseFromString(serialized_example)
#x_1 = np.array(example.features.feature['X'].float_list.value)
# Convert from [depth, height, width] to [height, width, depth].
#result.uint8image = tf.transpose(depth_major, [1, 2, 0])
feature.set_shape([None, None, channelsIn])
labels.set_shape([None, None, channelsOut])
# Crop and other random augmentations
#image = tf.image.random_flip_left_right(image)
#image = tf.image.random_saturation(image, .95, 1.05)
#image = tf.image.random_brightness(image, .05)
#image = tf.image.random_contrast(image, .95, 1.05)
#print('55')
#wiggle = 8
#off_x, off_y = 25-wiggle, 60-wiggle
#crop_size = 128
#crop_size_plus = crop_size + 2*wiggle
#print('56')
#image = tf.image.crop_to_bounding_box(image, off_y, | |
"")
te.pushval("a")
te.pushval("b")
te.pushval("c")
te.swap()
self.assertEqual(te.state(), TES_IDLE)
self.assertIsNone(te.match())
self.assertEqual(te.nvals(), 3)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.popval(), "b")
self.assertEqual(te.state(), TES_IDLE)
self.assertIsNone(te.match())
self.assertEqual(te.nvals(), 2)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.popval(), "c")
self.assertEqual(te.state(), TES_IDLE)
self.assertIsNone(te.match())
self.assertEqual(te.nvals(), 1)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.popval(), "a")
self.assertEqual(te.state(), TES_IDLE)
self.assertIsNone(te.match())
self.assertEqual(te.nvals(), 0)
self.assertEqual(te.last_error_detail(), "")
#-def
def test_stack(self):
te = TagEngine()
te.reset()
self.assertEqual(te.state(), TES_IDLE)
self.assertEqual(te.naddrs(), 0)
self.assertEqual(te.last_error_detail(), "")
te.popaddr()
self.assertEqual(te.state(), TES_ERROR)
self.assertEqual(te.naddrs(), 0)
self.assertEqual(te.last_error_detail(), "Pop applied on empty stack")
te.reset()
self.assertEqual(te.state(), TES_IDLE)
self.assertEqual(te.naddrs(), 0)
self.assertEqual(te.last_error_detail(), "")
te.topaddr()
self.assertEqual(te.state(), TES_ERROR)
self.assertEqual(te.naddrs(), 0)
self.assertEqual(te.last_error_detail(), "Top applied on empty stack")
te.reset()
self.assertEqual(te.state(), TES_IDLE)
self.assertEqual(te.naddrs(), 0)
self.assertEqual(te.last_error_detail(), "")
te.pushaddr(1)
self.assertEqual(te.state(), TES_IDLE)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.naddrs(), 1)
self.assertEqual(te.topaddr(), 1)
self.assertEqual(te.state(), TES_IDLE)
self.assertEqual(te.naddrs(), 1)
te.pushaddr(7)
self.assertEqual(te.state(), TES_IDLE)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.naddrs(), 2)
self.assertEqual(te.topaddr(), 7)
self.assertEqual(te.state(), TES_IDLE)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.naddrs(), 2)
self.assertEqual(te.popaddr(), 7)
self.assertEqual(te.state(), TES_IDLE)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.naddrs(), 1)
self.assertEqual(te.popaddr(), 1)
self.assertEqual(te.state(), TES_IDLE)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.naddrs(), 0)
self.assertIsNone(te.popaddr())
self.assertEqual(te.state(), TES_ERROR)
self.assertEqual(te.last_error_detail(), "Pop applied on empty stack")
self.assertEqual(te.naddrs(), 0)
#-def
def test_set_match_flag(self):
te = TagEngine()
te.reset()
self.assertFalse(te.match_flag())
te.set_match_flag(True)
self.assertTrue(te.match_flag())
te.set_match_flag(False)
self.assertFalse(te.match_flag())
#-def
def test_set_error_detail(self):
te = TagEngine()
te.reset()
self.assertEqual(te.last_error_detail(), "")
te.set_error_detail("error detail")
self.assertEqual(te.last_error_detail(), "error detail")
te.set_error_detail("detailed error detail")
self.assertEqual(te.last_error_detail(), "detailed error detail")
te.set_error_detail("")
self.assertEqual(te.last_error_detail(), "")
#-def
#-class
class TestInstructionsCase(unittest.TestCase):
def test_matcher_initialization(self):
ti = TagTextInput()
ti.load_data_from_string("Xoxoxo!")
tm = TagMatcher(ti)
self.assertIsNone(tm.last_match())
self.assertEqual(tm.last_error_detail(), "")
#-def
def test_tag_command_interface(self):
tc = TagCommand()
tc(TagEngine())
#-def
def create_and_run_tag_engine(self, s, c):
te = TagEngine()
ti = TagTextInput()
ti.load_data_from_string(s)
tp = TagProgram('itester', code = c)
te.run(ti, tp)
return te, ti
#-def
def check_match_ok(self, te, ti, m, n):
self.assertEqual(te.state(), TES_HALTED)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.ip(), 2)
self.assertEqual(te.match(), m)
self.assertEqual(ti.peek(), (n, n))
self.assertTrue(te.match_flag())
#-def
def check_match_ko(self, te, ti, u):
self.assertEqual(te.state(), TES_ERROR)
self.assertEqual(te.last_error_detail(),
"Unexpected input symbol %r" % u
)
self.assertEqual(te.ip(), 1)
self.assertIsNone(te.match())
self.assertEqual(ti.peek(), (u, u))
self.assertFalse(te.match_flag())
#-def
def check_match_word_ko(self, te, ti, m, e):
self.assertEqual(te.state(), TES_ERROR)
self.assertEqual(te.last_error_detail(),
"Unexpected word %r (need %r)" % (m, e)
)
self.assertEqual(te.ip(), 1)
self.assertIsNone(te.match())
self.assertEqual(ti.peek(), (m[0], m[0]))
self.assertFalse(te.match_flag())
#-def
def check_match_eof(self, te, ti):
self.assertEqual(te.state(), TES_ERROR)
self.assertEqual(te.last_error_detail(), "Unexpected end of input")
self.assertEqual(te.ip(), 1)
self.assertIsNone(te.match())
self.assertEqual(ti.peek(), (None, None))
self.assertFalse(te.match_flag())
#-def
def check_match_word_eof(self, te, ti, m, e):
self.assertEqual(te.state(), TES_ERROR)
self.assertEqual(te.last_error_detail(),
"Unexpected end of input (%r matched, %r needed)" % (m, e)
)
self.assertEqual(te.ip(), 1)
self.assertIsNone(te.match())
if m:
self.assertEqual(ti.peek(), (m[0], m[0]))
else:
self.assertEqual(ti.peek(), (None, None))
self.assertFalse(te.match_flag())
#-def
def check_test_result(self, te, ti, r, p):
self.assertEqual(te.state(), TES_HALTED)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.ip(), 2)
self.assertIsNone(te.match())
self.assertEqual(ti.peek(), (p, p))
if r:
self.assertTrue(te.match_flag())
else:
self.assertFalse(te.match_flag())
#-def
def check_branch(self, te, ti, ip, p):
self.assertEqual(te.state(), TES_HALTED)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.ip(), ip + 1)
self.assertIsNone(te.match())
self.assertEqual(ti.peek(), (p, p))
self.assertFalse(te.match_flag())
#-def
def check_skip_ok(self, te, ti, p):
self.assertEqual(te.state(), TES_HALTED)
self.assertEqual(te.last_error_detail(), "")
self.assertEqual(te.ip(), 2)
self.assertIsNone(te.match())
self.assertEqual(ti.peek(), (p, p))
self.assertTrue(te.match_flag())
#-def
check_skip_ko = check_match_ko
check_skip_eof = check_match_eof
def test_Fail(self):
te, ti = self.create_and_run_tag_engine("uvw", [
Fail("Bang!")
])
self.assertEqual(te.state(), TES_ERROR)
self.assertEqual(te.last_error_detail(), "Bang!")
self.assertEqual(te.ip(), 1)
self.assertIsNone(te.match())
self.assertEqual(ti.peek(), ('u', 'u'))
self.assertFalse(te.match_flag())
#-def
def test_MatchSymbol_ok(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchSymbol('a'), Halt()
])
self.check_match_ok(te, ti, "a", 'b')
#-def
def test_MatchSymbol_ko(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchSymbol('_'), Halt()
])
self.check_match_ko(te, ti, 'a')
#-def
def test_MatchSymbol_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchSymbol('_'), Halt()
])
self.check_match_eof(te, ti)
#-def
def test_MatchAny_ok(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchAny(), Halt()
])
self.check_match_ok(te, ti, "a", 'b')
#-def
def test_MatchAny_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchAny(), Halt()
])
self.check_match_eof(te, ti)
#-def
def test_MatchWord_ok(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchWord("ab"), Halt()
])
self.check_match_ok(te, ti, "ab", 'c')
#-def
def test_MatchWord_ko(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchWord("ac"), Halt()
])
self.check_match_word_ko(te, ti, "ab", "ac")
#-def
def test_MatchWord_eof_1(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchWord("abcd"), Halt()
])
self.check_match_word_eof(te, ti, "abc", "abcd")
#-def
def test_MatchWord_eof_2(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchWord("azcd"), Halt()
])
self.check_match_word_eof(te, ti, "abc", "azcd")
#-def
def test_MatchWord_eof_3(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchWord("zbcd"), Halt()
])
self.check_match_word_eof(te, ti, "abc", "zbcd")
#-def
def test_MatchWord_eof_4(self):
te, ti = self.create_and_run_tag_engine("", [
MatchWord("zbcd"), Halt()
])
self.check_match_word_eof(te, ti, "", "zbcd")
#-def
def test_MatchSet_ok_1(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchSet([ 'a', 'b', 'c', 'd' ]), Halt()
])
self.check_match_ok(te, ti, "a", 'b')
#-def
def test_MatchSet_ok_2(self):
te, ti = self.create_and_run_tag_engine("cbc", [
MatchSet([ 'a', 'b', 'c', 'd' ]), Halt()
])
self.check_match_ok(te, ti, "c", 'b')
#-def
def test_MatchSet_ko(self):
te, ti = self.create_and_run_tag_engine("xbc", [
MatchSet([ 'a', 'b', 'c', 'd' ]), Halt()
])
self.check_match_ko(te, ti, 'x')
#-def
def test_MatchSet_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchSet([ 'a', 'b', 'c', 'd' ]), Halt()
])
self.check_match_eof(te, ti)
#-def
def test_MatchRange_ok_1(self):
te, ti = self.create_and_run_tag_engine("017", [
MatchRange('0', '9'), Halt()
])
self.check_match_ok(te, ti, "0", '1')
#-def
def test_MatchRange_ok_2(self):
te, ti = self.create_and_run_tag_engine("317", [
MatchRange('0', '9'), Halt()
])
self.check_match_ok(te, ti, "3", '1')
#-def
def test_MatchRange_ok_3(self):
te, ti = self.create_and_run_tag_engine("917", [
MatchRange('0', '9'), Halt()
])
self.check_match_ok(te, ti, "9", '1')
#-def
def test_MatchRange_ko_1(self):
te, ti = self.create_and_run_tag_engine("0917", [
MatchRange('1', '9'), Halt()
])
self.check_match_ko(te, ti, '0')
#-def
def test_MatchRange_ko_2(self):
te, ti = self.create_and_run_tag_engine("917", [
MatchRange('0', '7'), Halt()
])
self.check_match_ko(te, ti, '9')
#-def
def test_MatchRange_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchRange('0', '7'), Halt()
])
self.check_match_eof(te, ti)
#-def
def test_MatchIf_ok(self):
te, ti = self.create_and_run_tag_engine("917", [
MatchIf(lambda c: c == '9'), Halt()
])
self.check_match_ok(te, ti, "9", '1')
#-def
def test_MatchIf_ko(self):
te, ti = self.create_and_run_tag_engine("917", [
MatchIf(lambda c: c == '8'), Halt()
])
self.check_match_ko(te, ti, '9')
#-def
def test_MatchIf_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchIf(lambda c: c == 'q'), Halt()
])
self.check_match_eof(te, ti)
#-def
def test_MatchAtLeastOneSymbol_ok_1(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchAtLeastOneSymbol('a'), Halt()
])
self.check_match_ok(te, ti, [ 'a' ], 'b')
#-def
def test_MatchAtLeastOneSymbol_ok_2(self):
te, ti = self.create_and_run_tag_engine("aabc", [
MatchAtLeastOneSymbol('a'), Halt()
])
self.check_match_ok(te, ti, [ 'a', 'a' ], 'b')
#-def
def test_MatchAtLeastOneSymbol_ok_3(self):
te, ti = self.create_and_run_tag_engine("aaabc", [
MatchAtLeastOneSymbol('a'), Halt()
])
self.check_match_ok(te, ti, [ 'a', 'a', 'a' ], 'b')
#-def
def test_MatchAtLeastOneSymbol_ko(self):
te, ti = self.create_and_run_tag_engine("xaabc", [
MatchAtLeastOneSymbol('a'), Halt()
])
self.check_match_ko(te, ti, 'x')
#-def
def test_MatchAtLeastOneSymbol_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchAtLeastOneSymbol('a'), Halt()
])
self.check_match_eof(te, ti)
#-def
def test_MatchAtLeastOneFromSet_ok_1(self):
te, ti = self.create_and_run_tag_engine("bxs", [
MatchAtLeastOneFromSet([ 'a', 'b', 'c' ]), Halt()
])
self.check_match_ok(te, ti, [ 'b' ], 'x')
#-def
def test_MatchAtLeastOneFromSet_ok_2(self):
te, ti = self.create_and_run_tag_engine("bcxs", [
MatchAtLeastOneFromSet([ 'a', 'b', 'c' ]), Halt()
])
self.check_match_ok(te, ti, [ 'b', 'c' ], 'x')
#-def
def test_MatchAtLeastOneFromSet_ok_3(self):
te, ti = self.create_and_run_tag_engine("bcaxs", [
MatchAtLeastOneFromSet([ 'a', 'b', 'c' ]), Halt()
])
self.check_match_ok(te, ti, [ 'b', 'c', 'a' ], 'x')
#-def
def test_MatchAtLeastOneFromSet_ko(self):
te, ti = self.create_and_run_tag_engine("xbcaxs", [
MatchAtLeastOneFromSet([ 'a', 'b', 'c' ]), Halt()
])
self.check_match_ko(te, ti, 'x')
#-def
def test_MatchAtLeastOneFromSet_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchAtLeastOneFromSet([ 'a', 'b', 'c' ]), Halt()
])
self.check_match_eof(te, ti)
#-def
def test_MatchAtLeastOneFromRange_ok_1(self):
te, ti = self.create_and_run_tag_engine("1_", [
MatchAtLeastOneFromRange('1', '9'), Halt()
])
self.check_match_ok(te, ti, [ '1' ], '_')
#-def
def test_MatchAtLeastOneFromRange_ok_2(self):
te, ti = self.create_and_run_tag_engine("91_", [
MatchAtLeastOneFromRange('1', '9'), Halt()
])
self.check_match_ok(te, ti, [ '9', '1' ], '_')
#-def
def test_MatchAtLeastOneFromRange_ok_3(self):
te, ti = self.create_and_run_tag_engine("971_", [
MatchAtLeastOneFromRange('1', '9'), Halt()
])
self.check_match_ok(te, ti, [ '9', '7', '1' ], '_')
#-def
def test_MatchAtLeastOneFromRange_ko(self):
te, ti = self.create_and_run_tag_engine("0_", [
MatchAtLeastOneFromRange('1', '9'), Halt()
])
self.check_match_ko(te, ti, '0')
#-def
def test_MatchAtLeastOneFromRange_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchAtLeastOneFromRange('1', '9'), Halt()
])
self.check_match_eof(te, ti)
#-def
def test_MatchAtLeastOne_ok_1(self):
te, ti = self.create_and_run_tag_engine("abc", [
MatchAtLeastOne(lambda c: c == 'a'), Halt()
])
self.check_match_ok(te, ti, [ 'a' ], 'b')
#-def
def test_MatchAtLeastOne_ok_2(self):
te, ti = self.create_and_run_tag_engine("aabc", [
MatchAtLeastOne(lambda c: c == 'a'), Halt()
])
self.check_match_ok(te, ti, [ 'a', 'a' ], 'b')
#-def
def test_MatchAtLeastOne_ok_3(self):
te, ti = self.create_and_run_tag_engine("aaabc", [
MatchAtLeastOne(lambda c: c == 'a'), Halt()
])
self.check_match_ok(te, ti, [ 'a', 'a', 'a' ], 'b')
#-def
def test_MatchAtLeastOne_ko(self):
te, ti = self.create_and_run_tag_engine("xaabc", [
MatchAtLeastOne(lambda c: c == 'a'), Halt()
])
self.check_match_ko(te, ti, 'x')
#-def
def test_MatchAtLeastOne_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchAtLeastOne(lambda c: c == 'a'), Halt()
])
self.check_match_eof(te, ti)
#-def
def test_MatchAtMostOneSymbol_ok_1(self):
te, ti = self.create_and_run_tag_engine("_", [
MatchAtMostOneSymbol('a'), Halt()
])
self.check_match_ok(te, ti, [], '_')
#-def
def test_MatchAtMostOneSymbol_ok_2(self):
te, ti = self.create_and_run_tag_engine("a_", [
MatchAtMostOneSymbol('a'), Halt()
])
self.check_match_ok(te, ti, [ 'a' ], '_')
#-def
def test_MatchAtMostOneSymbol_ok_3(self):
te, ti = self.create_and_run_tag_engine("aa_", [
MatchAtMostOneSymbol('a'), Halt()
])
self.check_match_ok(te, ti, [ 'a' ], 'a')
#-def
def test_MatchAtMostOneSymbol_eof(self):
te, ti = self.create_and_run_tag_engine("", [
MatchAtMostOneSymbol('a'), Halt()
])
self.check_match_ok(te, ti, [], None)
#-def
def test_MatchAtMostOneFromSet_ok_1(self):
te, ti = self.create_and_run_tag_engine("_", [
MatchAtMostOneFromSet([ 'a', 'b' ]), Halt()
])
self.check_match_ok(te, ti, [], '_')
#-def
def test_MatchAtMostOneFromSet_ok_2(self):
te, ti = self.create_and_run_tag_engine("b_", [
MatchAtMostOneFromSet([ 'a', 'b' ]), Halt()
])
self.check_match_ok(te, | |
import random, time
from testcases.testcases_base import TestcasesBase
import unittest
class TestGatewayAPICreation(TestcasesBase):
def setUp(self):
super().setUp()
self.core0_client.create_ovs_container()
self.core0_client.timeout = 30
self.flist = 'https://hub.gig.tech/gig-official-apps/ubuntu1604.flist'
self.container_body = {"name": self.rand_str(),
"hostname": self.rand_str(),
"flist": self.flist}
def tearDown(self):
self.lg.info(' [*] Delete all created {} gateways'.format(self.nodeid))
attributes = self.__dict__.keys()
if 'data' in attributes:
if self.data:
self.gateways_api.delete_nodes_gateway(self.nodeid, self.data['name'])
self.lg.info(' [*] TearDown:delete all created container ')
if 'container_data' in attributes:
if self.container_data:
self.containers_api.delete_containers_containerid(self.nodeid,
self.container_data['name'])
self.lg.info(' [*] TearDown:delete all created bridges ')
if 'bridge_data' in attributes:
if self.bridge_data:
self.bridges_api.delete_nodes_bridges_bridgeid(self.nodeid,
self.bridge_data['name'])
super().tearDown()
def create_vm(self, nics):
response = self.storageclusters_api.get_storageclusters()
self.assertEqual(response.status_code, 200)
storageclusters = response.json()
if storageclusters:
storagecluster = storageclusters[-1]
else:
free_disks = self.core0_client.getFreeDisks()
if free_disks == []:
self.skipTest(' [*] no free disks to create storagecluster.')
self.lg.info(' [*] Deploy new storage cluster (SC0).')
response, data = self.storageclusters_api.post_storageclusters(node_id=self.nodeid)
self.assertEqual(response.status_code, 201)
storagecluster = data['label']
self.lg.info(' [*] Create new vdisk.')
response, data = self.vdisks_api.post_vdisks(storagecluster=storagecluster, size=15, blocksize=4096, type='boot')
boot_disk = data['id']
self.lg.info(' [*] Create virtual machine (VM0) on node (N0)')
disks = [{"vdiskid": boot_disk, "maxIOps": 2000}]
response, data = self.vms_api.post_nodes_vms(node_id=self.nodeid, memory=1024, cpu=1, nics=nics, disks=disks)
self.assertEqual(response.status_code, 201)
return data
def test001_create_gateway_with_xlan_xlan_container(self):
""" GAT-123
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with Xlan and Xlan as nics on node (N0), should succeed.
#. Bind a new container to Xlan(1).
#. Bind a new container to Xlan(2).
#. Make sure that those two containers can ping each others.
"""
self.lg.info(' [*] Create gateway with xlan as nics on node (N0), should succeed')
nics_type = [{
'type': random.choice(['vlan', 'vxlan']),
'gateway': True,
'dhcp': False,
'bridge_name': '',
'zerotierbridge': False
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': False,
'bridge_name': '',
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': False,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
self.response, self.data = self.gateways_api.post_nodes_gateway(self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201)
self.lg.info(' [*] Bind a new container to vlan(1)')
nics_container = [{'type': nics[1]['type'],
'id': nics[1]['id'],
'config': {'dhcp': False,
'gateway': nics[1]['config']['cidr'][:-3],
'cidr': nics[1]['config']['cidr'][:-4] + '10/24'}
}]
uid_1 = self.core0_client.client.container.create(self.flist, nics=nics_container).get()
container_1 = self.core0_client.client.container.client(int(uid_1))
self.lg.info(' [*] Bind a new container to vlan(2)')
nics_container = [{'type': nics[2]['type'],
'id': nics[2]['id'],
'config': {'dhcp': False,
'gateway': nics[2]['config']['cidr'][:-3],
'cidr': nics[2]['config']['cidr'][:-4] + '10/24'}
}]
uid = self.core0_client.client.container.create(self.flist, nics=nics_container).get()
container_2 = self.core0_client.client.container.client(int(uid))
time.sleep(5)
self.lg.info(' [*] Make sure that those two containers can ping each others')
response = container_1.bash('ping -w5 %s' % nics[2]['config']['cidr'][:-4] + '10').get()
self.assertEqual(response.state, 'SUCCESS')
response = container_2.bash('ping -w5 %s' % nics[1]['config']['cidr'][:-4] + '10').get()
self.assertEqual(response.state, 'SUCCESS')
self.core0_client.client.container.terminate(int(uid_1))
self.core0_client.client.container.terminate(int(uid))
@unittest.skip('ssh to vm issue')
def test003_create_gateway_with_xlan_xlan_vm(self):
""" GAT-125
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with vlan and vlan as nics on node (N0), should succeed.
#. Bind a new vm to vlan(1).
#. Bind a new vm to vlan(2).
#. Make sure that those two containers can ping each others.
"""
nics_type = [{
'type':'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': '',
'zerotierbridge': False
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
vm1_mac_addr = nics[1]['dhcpserver']['hosts'][1]['macaddress']
vm1_ip_addr = nics[1]['dhcpserver']['hosts'][1]['ipaddress']
vm2_mac_addr = nics[2]['dhcpserver']['hosts'][1]['macaddress']
vm2_ip_addr = nics[2]['dhcpserver']['hosts'][1]['ipaddress']
test_container_mac_addr = nics[1]['dhcpserver']['hosts'][0]['macaddress']
nics[2]['dhcpserver']['hosts'][0]['macaddress'] = test_container_mac_addr
## set cloudinit
cloudinit = {"chpasswd": {"expire": false},
"ssh_pwauth":true, "users":
[{"plain_text_passwd": "<PASSWORD>", "lock-passwd": false,"name": "gig", "shell": "/bin/bash", "sudo": "ALL=(ALL) ALL"}]}
self.response, self.data = self.gateways_api.post_nodes_gateway(self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201)
nics = [{'id': nics[1]['id'], 'type': nics[1]['type'], 'macaddress': vm1_mac_addr}]
self.create_vm(nics=nics)
nics = [{'id': nics[2]['id'], 'type': nics[2]['type'], 'macaddress': vm2_mac_addr}]
self.create_vm(nics=nics)
self.lg.info(' [*] create test container')
nics = [{'type': nics[1]['type'], 'id': nics[1]['id'], 'config': {'dhcp': True}, 'hwaddr': test_container_mac_addr},
{'type': nics[2]['type'], 'id': nics[2]['id'], 'config': {'dhcp': True}, 'hwaddr': test_container_mac_addr}]
uid = self.core0_client.client.container.create(self.flist, nics=nics).get()
test_container = self.core0_client.client.container.client(uid)
time.sleep(60)
response = test_container.bash(
'sshpass -p rooter ssh gig@%s -oStrictHostKeyChecking=no ping %s' % (vm1_ip_addr, vm2_ip_addr)).get()
self.assertEqual(response.state, 'SUCCESS', response.stderr)
response = test_container.bash(
'sshpass -p rooter ssh gig@%s -oStrictHostKeyChecking=no ping %s' % (vm2_ip_addr, vm1_ip_addr)).get()
self.assertEqual(response.state, 'SUCCESS', response.stderr)
def test005_create_gateway_with_bridge_xlan_container(self):
""" GAT-127
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with bridge and vxlan as nics on node (N0), should succeed.
#. Bind a new container to vlan(1).
#. Verify that this container has public access.
"""
bridge_name = 'b' + self.random_string()
nics_type = [{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': bridge_name,
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
self.lg.info(' [*] Create bridge (B1) on node (N0), should succeed with 201')
setting = {"cidr": nics[0]['config']['gateway'] + '/24'}
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, name=bridge_name, networkMode='static', nat=True, setting=setting)
self.assertEqual(response.status_code, 201, response.content)
self.response, self.data = self.gateways_api.post_nodes_gateway(self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201)
self.lg.info(' [*] Create container')
nics_container = [{"type": nics[1]['type'],
"id": nics[1]['id'],
"hwaddr": nics[1]['dhcpserver']['hosts'][0]['macaddress'],
"config": {"dhcp": True}}]
response, self.container_data = self.containers_api.post_containers(self.nodeid, nics=nics_container)
self.assertEqual(response.status_code, 201, " [*] Can't create container.")
container = self.core0_client.get_container_client(self.container_data['name'])
self.assertTrue(container)
time.sleep(5)
response = container.bash('ping -w3 8.8.8.8').get()
self.assertEqual(response.state, 'SUCCESS')
self.assertNotIn("unreachable", response.stdout)
@unittest.skip('ssh to vm issue')
def test007_create_gateway_with_bridge_xlan_vm(self):
""" GAT-129
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with bridge and vlan as nics on node (N0), should succeed.
#. Bind a new vm to vlan(1).
#. Verify that this vm has public access.
"""
self.lg.info(' [*] Create bridge (B1) on node (N0), should succeed with 201')
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, networkMode='static', nat=True)
self.assertEqual(response.status_code, 201, response.content)
time.sleep(3)
nics_type = [{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': self.bridge_data['name'],
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
self.response, self.data = self.gateways_api.post_nodes_gateway(self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201)
vm1_mac_addr = nics_type[1]['dhcpserver']['hosts'][0]['macaddress']
vm1_ip_addr = nics_type[1]['dhcpserver']['hosts'][0]['ipaddress']
test_container_mac_addr = nics_type[1]['dhcpserver']['hosts'][1]['macaddress']
nics = [{'id': nics[1]['id'], 'type': nics[1]['type'], 'macaddress': vm1_mac_addr}]
self.create_vm(nics=nics)
self.lg.info(' [*] create test container')
nics = [{'type': nics[1]['type'], 'id': nics[1]['id'], 'config': {'dhcp': True}, 'hwaddr': test_container_mac_addr}]
uid = self.core0_client.client.container.create(self.flist, nics=nics).get()
test_container = self.core0_client.client.container.client(uid)
test_container.bash('apt install ssh -y; apt install sshpass -y')
time.sleep(60)
response = test_container.bash('ssh gig@%s -oStrictHostKeyChecking=no ping -w3 8.8.8.8' % vm1_ip_addr).get()
self.assertEqual(response.state, 'SUCCESS')
self.core0_client.client.container.terminate(int(uid))
def test009_create_gateway_dhcpserver(self):
""" GAT-131
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with bridge and xlan as nics on node (N0), should succeed.
#. Specify a dhcpserver for container and vm in this GW
#. Create a container and vm to match the dhcpserver specs
#. Verify that container and vm ips are matching with the dhcpserver specs.
"""
self.lg.info(' [*] Create bridge (B1) on node (N0), should succeed with 201')
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, networkMode='static', nat=True)
self.assertEqual(response.status_code, 201, response.content)
time.sleep(3)
nics_type = [{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': self.bridge_data['name'],
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
self.response, self.data = self.gateways_api.post_nodes_gateway(node_id=self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201, response.content)
nics_container = [{
'type': nics[1]['type'],
'name': 'test',
'id': nics[1]['id'],
'hwaddr': nics[1]['dhcpserver']['hosts'][0]['macaddress'],
'config': {'dhcp': True}
}]
uid = self.core0_client.client.container.create(self.flist, nics=nics_container).get()
time.sleep(5)
container_1 = self.core0_client.client.container.client(int(uid))
container_1_nics = container_1.info.nic()
interface = [x for x in container_1_nics if x['name'] == 'test']
self.assertNotEqual(interface, [])
self.assertIn(nics[1]['dhcpserver']['hosts'][0]['ipaddress'], [x['addr'][:-3] for x in interface[0]['addrs']])
self.assertEqual(nics[1]['dhcpserver']['hosts'][0]['macaddress'], interface[0]['hardwareaddr'])
self.core0_client.client.container.terminate(int(uid))
@unittest.skip('https://github.com/zero-os/0-orchestrator/issues/1102')
def test010_create_gateway_httpproxy(self):
""" GAT-132
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with bridge and vlan as nics and httpproxy with two containers on node (N0), should succeed.
#. Create two containers to for test the httpproxy's configuration
#. Verify that the httprxoy's configuration is working right
"""
self.lg.info(' [*] Create bridge (B1) on node (N0), should succeed with 201')
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, networkMode='static', nat=True)
self.assertEqual(response.status_code, 201, response.content)
time.sleep(3)
nics_type = [{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': self.bridge_data['name'],
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics_data = self.get_gateway_nic(nics_types=nics_type)
httpproxies = [
{
"host": "container1",
"destinations": ['http://{}:1000'.format(nics_data[1]['config']['cidr'][:-4] + '10/24')],
"types": ['http', 'https']
},
{
"host": "container2",
"destinations": ['http://{}:2000'.format(nics_data[1]['config']['cidr'][:-4] + '20/24')],
"types": ['http', 'https']
}
]
self.response, self.data = self.gateways_api.post_nodes_gateway(node_id=self.nodeid, nics=nics_data, httpproxies=httpproxies)
self.assertEqual(response.status_code, 201, response.content)
nics = [{'type': nics_type[1]['type'],
'id': nics_data[1]['id'],
'config': {'dhcp': False,
'gateway': nics_data[1]['config']['cidr'][:-3],
'cidr': nics_data[1]['config']['cidr'][:-4] + '10/24'}}]
uid_1 = self.core0_client.client.container.create(self.flist, nics=nics).get()
container_1 = self.core0_client.client.container.client(int(uid_1))
| |
workspace:
gs_styles = [x for x in cat.get_styles(names=[f"{settings.DEFAULT_WORKSPACE}_{resource_name}"])]
styles = styles + gs_styles
cat.delete(lyr)
for s in styles:
if s is not None and s.name not in _default_style_names:
try:
logger.debug(f"Trying to delete Style [{s.name}]")
cat.delete(s, purge='true')
except Exception as e:
# Trying to delete a shared style will fail
# We'll catch the exception and log it.
logger.debug(e)
# Due to a possible bug of geoserver, we need this trick for now
# TODO: inspect the issue reported by this hack. Should be solved
# with GS 2.7+
try:
cat.delete(resource, recurse=True) # This may fail
except Exception:
cat._cache.clear()
cat.reset()
if store.resource_type == 'dataStore' and 'dbtype' in store.connection_parameters and \
store.connection_parameters['dbtype'] == 'postgis':
delete_from_postgis(resource_name, store)
else:
if store.resource_type == 'coverageStore':
try:
logger.debug(f" - Going to purge the {store.resource_type} : {store.href}")
cat.reset() # this resets the coverage readers and unlocks the files
cat.delete(store, purge='all', recurse=True)
# cat.reload() # this preservers the integrity of geoserver
except Exception as e:
# Trying to recursively purge a store may fail
# We'll catch the exception and log it.
logger.debug(e)
else:
try:
if not store.get_resources():
cat.delete(store, recurse=True)
except Exception as e:
# Catch the exception and log it.
logger.debug(e)
def delete_from_postgis(layer_name, store):
"""
Delete a table from PostGIS (because Geoserver won't do it yet);
to be used after deleting a layer from the system.
"""
import psycopg2
# we will assume that store/database may change (when using shard for example)
# but user and password are the ones from settings (DATASTORE_URL)
db = ogc_server_settings.datastore_db
db_name = store.connection_parameters['database']
user = db['USER']
password = db['PASSWORD']
host = store.connection_parameters['host']
port = store.connection_parameters['port']
conn = None
try:
conn = psycopg2.connect(dbname=db_name, user=user, host=host, port=port, password=password)
cur = conn.cursor()
cur.execute(f"SELECT DropGeometryTable ('{layer_name}')")
conn.commit()
except Exception as e:
logger.error(
"Error deleting PostGIS table %s:%s",
layer_name,
str(e))
finally:
try:
if conn:
conn.close()
except Exception as e:
logger.error("Error closing PostGIS conn %s:%s", layer_name, str(e))
def gs_slurp(
ignore_errors=True,
verbosity=1,
console=None,
owner=None,
workspace=None,
store=None,
filter=None,
skip_unadvertised=False,
skip_geonode_registered=False,
remove_deleted=False,
permissions=None,
execute_signals=False):
"""Configure the layers available in GeoServer in GeoNode.
It returns a list of dictionaries with the name of the layer,
the result of the operation and the errors and traceback if it failed.
"""
if console is None:
console = open(os.devnull, 'w')
if verbosity > 0:
print("Inspecting the available layers in GeoServer ...", file=console)
cat = gs_catalog
if workspace is not None and workspace:
workspace = cat.get_workspace(workspace)
if workspace is None:
resources = []
else:
# obtain the store from within the workspace. if it exists, obtain resources
# directly from store, otherwise return an empty list:
if store is not None:
store = get_store(cat, store, workspace=workspace)
if store is None:
resources = []
else:
resources = cat.get_resources(stores=[store])
else:
resources = cat.get_resources(workspaces=[workspace])
elif store is not None:
store = get_store(cat, store)
resources = cat.get_resources(stores=[store])
else:
resources = cat.get_resources()
if remove_deleted:
resources_for_delete_compare = resources[:]
workspace_for_delete_compare = workspace
# filter out layers for delete comparison with GeoNode layers by following criteria:
# enabled = true, if --skip-unadvertised: advertised = true, but
# disregard the filter parameter in the case of deleting layers
try:
resources_for_delete_compare = [
k for k in resources_for_delete_compare if k.enabled in {"true", True}]
if skip_unadvertised:
resources_for_delete_compare = [
k for k in resources_for_delete_compare if k.advertised in {"true", True}]
except Exception:
if ignore_errors:
pass
else:
raise
if filter:
resources = [k for k in resources if filter in k.name]
# filter out layers depending on enabled, advertised status:
_resources = []
for k in resources:
try:
if k.enabled in {"true", True}:
_resources.append(k)
except Exception:
if ignore_errors:
continue
else:
raise
# resources = [k for k in resources if k.enabled in {"true", True}]
resources = _resources
if skip_unadvertised:
try:
resources = [k for k in resources if k.advertised in {"true", True}]
except Exception:
if ignore_errors:
pass
else:
raise
# filter out layers already registered in geonode
layer_names = Layer.objects.all().values_list('alternate', flat=True)
if skip_geonode_registered:
try:
resources = [k for k in resources
if f'{k.workspace.name}:{k.name}' not in layer_names]
except Exception:
if ignore_errors:
pass
else:
raise
# TODO: Should we do something with these?
# i.e. look for matching layers in GeoNode and also disable?
# disabled_resources = [k for k in resources if k.enabled == "false"]
number = len(resources)
if verbosity > 0:
msg = "Found %d layers, starting processing" % number
print(msg, file=console)
output = {
'stats': {
'failed': 0,
'updated': 0,
'created': 0,
'deleted': 0,
},
'layers': [],
'deleted_layers': []
}
start = datetime.datetime.now(timezone.get_current_timezone())
for i, resource in enumerate(resources):
name = resource.name
the_store = resource.store
workspace = the_store.workspace
try:
created = False
layer = Layer.objects.filter(name=name, workspace=workspace.name).first()
if not layer:
layer = Layer.objects.create(
name=name,
workspace=workspace.name,
store=the_store.name,
storeType=the_store.resource_type,
alternate=f"{workspace.name}:{resource.name}",
title=resource.title or 'No title provided',
abstract=resource.abstract or "{}".format(_('No abstract provided')),
owner=owner,
uuid=str(uuid.uuid4())
)
created = True
layer.bbox_x0 = Decimal(resource.native_bbox[0])
layer.bbox_x1 = Decimal(resource.native_bbox[1])
layer.bbox_y0 = Decimal(resource.native_bbox[2])
layer.bbox_y1 = Decimal(resource.native_bbox[3])
layer.srid = resource.projection
# sync permissions in GeoFence
perm_spec = json.loads(_perms_info_json(layer))
layer.set_permissions(perm_spec)
# recalculate the layer statistics
set_attributes_from_geoserver(layer, overwrite=True)
# in some cases we need to explicitily save the resource to execute the signals
# (for sure when running updatelayers)
if execute_signals:
layer.save(notify=True)
# Fix metadata links if the ip has changed
if layer.link_set.metadata().count() > 0:
if not created and settings.SITEURL not in layer.link_set.metadata()[0].url:
layer.link_set.metadata().delete()
layer.save()
metadata_links = []
for link in layer.link_set.metadata():
metadata_links.append((link.mime, link.name, link.url))
resource.metadata_links = metadata_links
cat.save(resource)
except Exception as e:
if ignore_errors:
status = 'failed'
exception_type, error, traceback = sys.exc_info()
else:
if verbosity > 0:
msg = "Stopping process because --ignore-errors was not set and an error was found."
print(msg, file=sys.stderr)
raise Exception(f"Failed to process {resource.name}") from e
else:
if created:
if not permissions:
layer.set_default_permissions()
else:
layer.set_permissions(permissions)
status = 'created'
output['stats']['created'] += 1
else:
status = 'updated'
output['stats']['updated'] += 1
msg = "[%s] Layer %s (%d/%d)" % (status, name, i + 1, number)
info = {'name': name, 'status': status}
if status == 'failed':
output['stats']['failed'] += 1
info['traceback'] = traceback
info['exception_type'] = exception_type
info['error'] = error
output['layers'].append(info)
if verbosity > 0:
print(msg, file=console)
if remove_deleted:
q = Layer.objects.filter()
if workspace_for_delete_compare is not None:
if isinstance(workspace_for_delete_compare, Workspace):
q = q.filter(
workspace__exact=workspace_for_delete_compare.name)
else:
q = q.filter(workspace__exact=workspace_for_delete_compare)
if store is not None:
if isinstance(
store,
CoverageStore) or isinstance(
store,
DataStore):
q = q.filter(store__exact=store.name)
else:
q = q.filter(store__exact=store)
logger.debug("Executing 'remove_deleted' logic")
logger.debug("GeoNode Layers Found:")
# compare the list of GeoNode layers obtained via query/filter with valid resources found in GeoServer
# filtered per options passed to updatelayers: --workspace, --store, --skip-unadvertised
# add any layers not found in GeoServer to deleted_layers (must match
# workspace and store as well):
deleted_layers = []
for layer in q:
logger.debug(
"GeoNode Layer info: name: %s, workspace: %s, store: %s",
layer.name,
layer.workspace,
layer.store)
layer_found_in_geoserver = False
for resource in resources_for_delete_compare:
# if layer.name matches a GeoServer resource, check also that
# workspace and store match, mark valid:
if layer.name == resource.name:
if layer.workspace == resource.workspace.name and layer.store == resource.store.name:
logger.debug(
"Matches GeoServer layer: name: %s, workspace: %s, store: %s",
resource.name,
resource.workspace.name,
resource.store.name)
layer_found_in_geoserver = True
if not layer_found_in_geoserver:
logger.debug(
"----- Layer %s not matched, marked for deletion ---------------",
layer.name)
deleted_layers.append(layer)
number_deleted = len(deleted_layers)
if verbosity > 0:
msg = "\nFound %d layers to delete, starting processing" % number_deleted if number_deleted > 0 else \
"\nFound %d layers to delete" % number_deleted
print(msg, file=console)
for i, layer in enumerate(deleted_layers):
logger.debug(
"GeoNode Layer to delete: name: %s, workspace: %s, store: %s",
layer.name,
layer.workspace,
layer.store)
try:
# delete ratings, comments, and taggit tags:
ct = ContentType.objects.get_for_model(layer)
OverallRating.objects.filter(
content_type=ct,
object_id=layer.id).delete()
Comment.objects.filter(
content_type=ct,
object_id=layer.id).delete()
layer.keywords.clear()
layer.delete()
output['stats']['deleted'] += 1
status = "delete_succeeded"
except Exception:
status = "delete_failed"
finally:
from .signals import geoserver_pre_delete
pre_delete.connect(geoserver_pre_delete, sender=Layer)
msg = "[%s] Layer %s (%d/%d)" % (status,
layer.name,
i + 1,
number_deleted)
info = {'name': layer.name, 'status': status}
if status == "delete_failed":
exception_type, error, traceback = sys.exc_info()
info['traceback'] = traceback
info['exception_type'] = exception_type
info['error'] = error
output['deleted_layers'].append(info)
if verbosity > 0:
print(msg, | |
in leaks:
if type(obj) in ignore_types:
break
else:
real_leaks.append(obj)
leaks = real_leaks
else:
return leaks or None
logger.warn('{} {}s leaked. {}', (len(leaks)), type_name, ('' if _trace_all_leaks else 'Enable _trace_all_leaks in zone.py for potential callstacks. '), owner='mduke')
for item in leaks:
logger.warn(' {} "{}"', type(item).__name__, item)
if _trace_all_leaks:
time_stamp = time.time()
for item in leaks:
find_object_refs(item, valid_refs={id(weak_set)})
if time.time() - time_stamp > 25:
logger.warn('{} leak detection terminated after 25ms.', type_name, owner='mduke')
break
weak_set.clear()
self.manager_reference_tracker.remove(self)
check_for_leaks(self.all_interactions, 'interaction')
ignore_managed_object_types = None
if services.game_services.service_manager.is_traveling:
import sims
ignore_managed_object_types = set((sims.sim_info.SimInfo,
sims.household.Household))
check_for_leaks((self.manager_reference_tracker), 'managed object', ignore_types=ignore_managed_object_types)
def on_objects_loaded(self):
self._set_zone_state(zone_types.ZoneState.OBJECTS_LOADED)
def on_client_connect(self, client):
self._client = client
self._set_zone_state(zone_types.ZoneState.CLIENT_CONNECTED)
def on_households_and_sim_infos_loaded(self):
self._set_zone_state(zone_types.ZoneState.HOUSEHOLDS_AND_SIM_INFOS_LOADED)
object_preference_tracker = services.object_preference_tracker()
if object_preference_tracker is not None:
object_preference_tracker.convert_existing_preferences()
def on_loading_screen_animation_finished(self):
logger.debug('on_loading_screen_animation_finished')
services.game_clock_service().restore_saved_clock_speed()
services.sim_info_manager().on_loading_screen_animation_finished()
services.get_event_manager().process_events_for_household((test_events.TestEvent.SimTravel), (services.active_household()),
zone_id=(self.id))
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is not None:
business_manager.on_loading_screen_animation_finished()
services.venue_service().on_loading_screen_animation_finished()
landlord_service = services.get_landlord_service()
if landlord_service is not None:
landlord_service.on_loading_screen_animation_finished()
laundry_service = services.get_laundry_service()
if laundry_service is not None:
laundry_service.on_loading_screen_animation_finished()
self.zone_spin_up_service.on_loading_screen_animation_finished()
def _set_zone_state(self, state):
logger.assert_raise((self._zone_state + 1 == state or state == zone_types.ZoneState.SHUTDOWN_STARTED), 'Illegal zone state change: {} to {}',
(self._zone_state), state, owner='sscholl')
self._zone_state = state
if state in self._zone_state_callbacks:
self._zone_state_callbacks[state]()
del self._zone_state_callbacks[state]
def register_callback(self, callback_type, callback):
if self._zone_state == zone_types.ZoneState.SHUTDOWN_STARTED:
return
if callback_type <= self._zone_state:
callback()
return
self._zone_state_callbacks[callback_type].append(callback)
def unregister_callback(self, callback_type, callback):
if callback in self._zone_state_callbacks[callback_type]:
self._zone_state_callbacks[callback_type].remove(callback)
def find_object(self, obj_id, include_props=False, include_household=False):
obj = self.object_manager.get(obj_id)
if obj is None:
obj = self.inventory_manager.get(obj_id)
if obj is None:
if include_props:
obj = self.prop_manager.get(obj_id)
if obj is None:
if include_household:
household_id = self.lot.owner_household_id
if household_id != 0:
obj = get_object_in_household_inventory(obj_id, household_id)
return obj
def spawn_points_gen(self):
yield from self._world_spawn_points.values()
yield from self._dynamic_spawn_points.values()
if False:
yield None
def _get_spawn_point_by_id(self, spawn_point_id):
if spawn_point_id in self._world_spawn_points:
return self._world_spawn_points[spawn_point_id]
if spawn_point_id in self._dynamic_spawn_points:
return self._dynamic_spawn_points[spawn_point_id]
def set_up_world_spawn_points(self, locator_array):
self._world_spawn_point_locators = locator_array
def add_dynamic_spawn_point(self, spawn_point):
self._dynamic_spawn_points[spawn_point.spawn_point_id] = spawn_point
spawn_point.on_add()
self._on_spawn_points_changed()
def remove_dynamic_spawn_point(self, spawn_point):
spawn_point.on_remove()
self._dynamic_spawn_points.pop(spawn_point.spawn_point_id)
self._on_spawn_points_changed()
def get_spawn_point_ignore_constraint(self):
objects_to_ignore = set()
for spawn_point in self._world_spawn_points.values():
objects_to_ignore.add(spawn_point.spawn_point_id)
return Constraint(objects_to_ignore=objects_to_ignore, debug_name='IgnoreSpawnPointConstraint')
def _get_spawn_points_with_lot_id_and_tags(self, sim_info=None, lot_id=None, sim_spawner_tags=None, except_lot_id=None, spawn_point_request_reason=SpawnPointRequestReason.DEFAULT):
spawn_points = []
if not sim_spawner_tags:
return
for spawn_point in self.spawn_points_gen():
if lot_id is not None:
if spawn_point.lot_id != lot_id:
continue
if except_lot_id is not None:
if spawn_point.lot_id == except_lot_id:
continue
tags = spawn_point.get_tags()
if not tags.intersection(sim_spawner_tags):
continue
if not spawn_point.is_valid(sim_info=sim_info, spawn_point_request_reason=spawn_point_request_reason):
continue
spawn_points.append(spawn_point)
if spawn_points:
max_priority = max((p.spawn_point_priority for p in spawn_points))
spawn_points = [p for p in spawn_points if p.spawn_point_priority == max_priority]
return spawn_points
def get_spawn_point(self, lot_id=None, sim_spawner_tags=None, must_have_tags=False, spawning_sim_info=None, spawn_point_request_reason=SpawnPointRequestReason.DEFAULT):
spawn_points = list(self.spawn_points_gen())
if not spawn_points:
return
spawn_points_with_tags = self._get_spawn_points_with_lot_id_and_tags(sim_info=spawning_sim_info,
lot_id=lot_id,
sim_spawner_tags=sim_spawner_tags,
spawn_point_request_reason=spawn_point_request_reason)
if not spawn_points_with_tags:
if not must_have_tags:
spawn_points_with_tags = self._get_spawn_points_with_lot_id_and_tags(sim_info=spawning_sim_info,
sim_spawner_tags=sim_spawner_tags,
spawn_point_request_reason=spawn_point_request_reason)
else:
if spawn_points_with_tags:
return random.choice(spawn_points_with_tags)
return must_have_tags or random.choice(spawn_points)
return
def get_spawn_points_constraint(self, sim_info=None, lot_id=None, sim_spawner_tags=None, except_lot_id=None, spawn_point_request_reason=SpawnPointRequestReason.DEFAULT, generalize=False, backup_sim_spawner_tags=None, backup_lot_id=None):
spawn_point_option = SpawnPointOption.SPAWN_ANY_POINT_WITH_CONSTRAINT_TAGS
search_tags = sim_spawner_tags
spawn_point_id = None
original_spawn_point = None
if sim_info is not None:
if sim_spawner_tags is None:
spawn_point_option = sim_info.spawn_point_option if sim_info.spawn_point_option is not None else SpawnPointOption.SPAWN_SAME_POINT
spawn_point_id = sim_info.spawn_point_id
original_spawn_point = self._get_spawn_point_by_id(spawn_point_id)
if spawn_point_option == SpawnPointOption.SPAWN_ANY_POINT_WITH_SAVED_TAGS or spawn_point_option == SpawnPointOption.SPAWN_DIFFERENT_POINT_WITH_SAVED_TAGS:
search_tags = sim_info.spawner_tags
points = []
if search_tags is not None:
spawn_points_with_tags = self._get_spawn_points_with_lot_id_and_tags(sim_info=sim_info,
lot_id=lot_id,
sim_spawner_tags=search_tags,
except_lot_id=except_lot_id,
spawn_point_request_reason=spawn_point_request_reason)
if not spawn_points_with_tags:
if backup_sim_spawner_tags is not None:
spawn_points_with_tags = self._get_spawn_points_with_lot_id_and_tags(sim_info=sim_info,
lot_id=backup_lot_id,
sim_spawner_tags=backup_sim_spawner_tags,
except_lot_id=except_lot_id,
spawn_point_request_reason=spawn_point_request_reason)
if spawn_points_with_tags:
for spawn_point in spawn_points_with_tags:
if spawn_point_option == SpawnPointOption.SPAWN_DIFFERENT_POINT_WITH_SAVED_TAGS:
if original_spawn_point is not None:
if spawn_point.spawn_point_id == original_spawn_point.spawn_point_id:
continue
position_constraints = spawn_point.get_position_constraints(generalize=generalize)
if position_constraints:
points.extend(position_constraints)
if spawn_point_option == SpawnPointOption.SPAWN_DIFFERENT_POINT_WITH_SAVED_TAGS:
if original_spawn_point:
if points:
approximate_center = original_spawn_point.get_approximate_center()
comparable_spawn_point_center = sims4.math.Vector3(approximate_center.x, 0.0, approximate_center.z)
weighted_points = [((comparable_spawn_point_center - point.single_point()[0]).magnitude(), point) for point in points]
selected_spawn_point = sims4.random.weighted_random_item(weighted_points)
return interactions.constraints.create_constraint_set(set(selected_spawn_point))
if points:
return interactions.constraints.create_constraint_set(points)
if spawn_point_option == SpawnPointOption.SPAWN_SAME_POINT:
if original_spawn_point:
points = original_spawn_point.get_position_constraints(generalize=generalize)
if points:
return interactions.constraints.create_constraint_set(points)
for spawn_point in self.spawn_points_gen():
position_constraints = spawn_point.get_position_constraints(generalize=generalize)
if position_constraints:
points.extend(position_constraints)
if points:
return interactions.constraints.create_constraint_set(points)
logger.warn('There are no spawn locations on this lot. The corners of the lot are being used instead: {}', (services.current_zone().lot), owner='rmccord')
return self.get_lot_corners_constraint_set()
def get_lot_corners_constraint_set(self):
lot_center = self.lot.center
lot_corners = services.current_zone().lot.corners
routing_surface = routing.SurfaceIdentifier(services.current_zone().id, 0, routing.SurfaceType.SURFACETYPE_WORLD)
constraint_list = []
for corner in lot_corners:
diff = lot_center - corner
if diff.magnitude_squared() != 0:
towards_center_vec = sims4.math.vector_normalize(lot_center - corner) * 0.1
else:
towards_center_vec = sims4.math.Vector3.ZERO()
new_corner = corner + towards_center_vec
constraint_list.append(interactions.constraints.Position(new_corner, routing_surface=routing_surface))
return create_constraint_set(constraint_list)
def validate_spawn_points(self):
if not self._world_spawn_points:
if not self._dynamic_spawn_points:
return
dest_handles = set()
lot_center = self.lot.center
lot_corners = self.lot.corners
routing_surface = routing.SurfaceIdentifier(self.id, 0, routing.SurfaceType.SURFACETYPE_WORLD)
for corner in lot_corners:
diff = lot_center - corner
if diff.magnitude_squared() != 0:
towards_center_vec = sims4.math.vector_normalize(lot_center - corner) * 0.1
else:
towards_center_vec = sims4.math.Vector3.ZERO()
new_corner = corner + towards_center_vec
location = routing.Location(new_corner, sims4.math.Quaternion.IDENTITY(), routing_surface)
dest_handles.add(routing.connectivity.Handle(location))
for spawn_point in self.spawn_points_gen():
spawn_point.validate_connectivity(dest_handles)
self._on_spawn_points_changed()
def register_spawn_points_changed_callback(self, callback):
self._spawn_points_changed_callbacks.append(callback)
def unregister_spawn_points_changed_callback(self, callback):
self._spawn_points_changed_callbacks.remove(callback)
def _on_spawn_points_changed(self):
self._spawn_points_changed_callbacks()
def _update_navmesh_id_if_neccessary(self):
new_navmesh_id = interactions.utils.routing.routing.planner_build_id()
if self.navmesh_id != new_navmesh_id:
self.navmesh_id = new_navmesh_id
self.check_perform_deferred_front_door_check()
self.navmesh_change_callbacks()
self._handle_live_drag_objects()
if gsi_handlers.routing_handlers.build_archiver.enabled:
gsi_handlers.routing_handlers.archive_build(new_navmesh_id)
def _handle_live_drag_objects(self):
client = services.client_manager().get_first_client()
if client is not None:
sim_info_manager = services.sim_info_manager()
for live_drag_object in client.objects_moved_via_live_drag:
footprint_polygon = live_drag_object.footprint_polygon
if footprint_polygon is not None:
for sim in sim_info_manager.instanced_sims_gen():
if footprint_polygon.contains(sim.position):
sim.reset((ResetReason.RESET_EXPECTED), cause='Live Drag object with footprint dropped on Sim.')
client.objects_moved_via_live_drag.clear()
def check_perform_deferred_front_door_check(self):
if self._should_perform_deferred_front_door_check:
logger.info('Attempting to fix up doors, searching...')
services.get_door_service().fix_up_doors()
if not services.get_door_service().has_front_door():
logger.info('No front door found.')
self._should_perform_deferred_front_door_check = False
def _check_navmesh_updated_alarm_callback(self, *_):
try:
self._update_navmesh_id_if_neccessary()
except:
logger.exception('Exception thrown while processing navmesh update callbacks. Eating this exception to prevent the alarm from self-destructing.', owner='tastle')
def on_build_buy_enter(self):
self.is_in_build_buy = True
laundry_service = services.get_laundry_service()
if laundry_service is not None:
laundry_service.on_build_buy_enter()
def on_build_buy_exit(self):
self._update_navmesh_id_if_neccessary()
self.is_in_build_buy = False
self._add_expenditures_and_do_post_bb_fixup()
services.active_lot().flag_as_premade(False)
household = services.owning_household_of_active_lot()
if household:
services.get_event_manager().process_events_for_household(test_events.TestEvent.OnExitBuildBuy, household)
else:
services.get_event_manager().process_event(test_events.TestEvent.OnExitBuildBuy)
self._should_perform_deferred_front_door_check = True
laundry_service = services.get_laundry_service()
if laundry_service is not None:
laundry_service.on_build_buy_exit()
def on_active_lot_clearing_begin(self):
self.is_active_lot_clearing = True
def on_active_lot_clearing_end(self):
self.is_active_lot_clearing = False
def set_to_fixup_on_build_buy_exit(self, obj):
if self.objects_to_fixup_post_bb is None:
self.objects_to_fixup_post_bb = weakref.WeakSet()
self.objects_to_fixup_post_bb.add(obj)
def revert_zone_architectural_stat_effects(self):
statistic_manager = services.statistic_manager()
for stat_id, stat_value in self.zone_architectural_stat_effects.items():
stat = statistic_manager.get(stat_id)
if stat is None:
continue
tracker = self.lot.get_tracker(stat)
if tracker is None:
continue
tracker.add_value(stat, -stat_value)
self.zone_architectural_stat_effects.clear()
def _add_expenditures_and_do_post_bb_fixup(self):
if self.objects_to_fixup_post_bb is not None:
household = self.lot.get_household()
rebate_manager = household.rebate_manager if household is not None else None
active_household_id = services.active_household_id()
for obj in self.objects_to_fixup_post_bb:
if rebate_manager is not None:
rebate_manager.add_rebate_for_object(obj.id, RebateCategoryEnum.BUILD_BUY)
obj.try_post_bb_fixup(active_household_id=active_household_id)
self.objects_to_fixup_post_bb = None
@property
def save_slot_data_id(self):
return self._save_slot_data_id
def save_zone(self, save_slot_data=None):
zone_data_msg = self._get_zone_proto()
zone_data_msg.ClearField('gameplay_zone_data')
gameplay_zone_data = zone_data_msg.gameplay_zone_data
gameplay_zone_data.lot_owner_household_id_on_save = self.lot.owner_household_id
gameplay_zone_data.venue_type_id_on_save = self.venue_service.active_venue.guid64 if self.venue_service.active_venue is not None else 0
gameplay_zone_data.active_household_id_on_save = services.active_household_id()
travel_group = self.travel_group_manager.get_travel_group_by_zone_id(self.id)
gameplay_zone_data.active_travel_group_id_on_save = travel_group.id if travel_group is not None else 0
save_ticks = services.time_service().sim_now.absolute_ticks()
game_clock = services.game_clock_service()
gameplay_zone_data.game_time = save_ticks
if game_clock.persistable_clock_speed == ClockSpeedMode.PAUSED:
gameplay_zone_data.clock_speed_mode = ClockSpeedMode.PAUSED
else:
gameplay_zone_data.clock_speed_mode = ClockSpeedMode.NORMAL
self.lot.save(gameplay_zone_data)
for stat_id, value in self.zone_architectural_stat_effects.items():
with ProtocolBufferRollback(gameplay_zone_data.architectural_statistics) as (entry):
entry.name_hash = stat_id
entry.value = value
num_spawn_points = len(self._world_spawn_points)
spawn_point_ids = [0] * num_spawn_points
for spawn_point_id, spawn_point in self._world_spawn_points.items():
spawn_point_ids[spawn_point.spawn_point_index] = spawn_point_id
zone_data_msg.ClearField('spawn_point_ids')
zone_data_msg.spawn_point_ids.extend(spawn_point_ids)
zone_objects_message = serialization.ZoneObjectData()
object_list = serialization.ObjectList()
zone_objects_message.zone_id = self.id
persistence_service = services.get_persistence_service()
open_street_data = persistence_service.get_open_street_proto_buff(self.open_street_id)
if open_street_data is not None:
open_street_data.Clear()
add_proto_to_persistence = False
else:
open_street_data = serialization.OpenStreetsData()
add_proto_to_persistence = True
open_street_data.world_id = self.open_street_id
open_street_data.nbh_id = self.neighborhood_id
open_street_data.sim_time_on_save = save_ticks
open_street_data.active_household_id_on_save = services.active_household_id()
open_street_data.active_zone_id_on_save = self.id
game_service_manager = game_services.service_manager
game_service_manager.save_all_services(persistence_service, object_list=object_list,
zone_data=zone_data_msg,
open_street_data=open_street_data,
save_slot_data=save_slot_data)
self.service_manager.save_all_services(persistence_service, object_list=object_list,
zone_data=zone_data_msg,
open_street_data=open_street_data,
save_slot_data=save_slot_data)
zone_objects_message.objects.append(object_list)
if add_proto_to_persistence:
services.get_persistence_service().add_open_street_proto_buff(open_street_data)
persistence_module.run_persistence_operation(persistence_module.PersistenceOpType.kPersistenceOpSaveZoneObjects, zone_objects_message, 0, None)
def load_zone(self):
zone_data_proto = self._get_zone_proto()
gameplay_zone_data = zone_data_proto.gameplay_zone_data
self.neighborhood_id = zone_data_proto.neighborhood_id
self.open_street_id = zone_data_proto.world_id
game_service_manager = game_services.service_manager
game_service_manager.load_all_services(zone_data=zone_data_proto)
self.service_manager.load_all_services(zone_data=zone_data_proto)
self._first_visit_to_zone = not protocol_buffer_utils.has_field(gameplay_zone_data, 'venue_type_id_on_save')
if gameplay_zone_data.HasField('game_time'):
self._time_of_last_save = DateAndTime(gameplay_zone_data.game_time)
if gameplay_zone_data.HasField('clock_speed_mode'):
self._client_connect_speed = ClockSpeedMode(gameplay_zone_data.clock_speed_mode)
open_street_data = services.get_persistence_service().get_open_street_proto_buff(self.open_street_id)
if open_street_data is not None:
self._time_of_last_open_street_save = DateAndTime(open_street_data.sim_time_on_save)
if zone_data_proto.spawn_point_ids:
if len(zone_data_proto.spawn_point_ids) != len(self._world_spawn_point_locators):
logger.error('Number of world spawn points {} does not match persisted count of {}. This is possible if world builder has added or removed spawn points since the last time this zone was visited. Resetting spawn point ids to recover from this error case. This might temporarily cause Sims to leave to random spawn points.', (len(self._world_spawn_point_locators)),
(len(zone_data_proto.spawn_point_ids)), owner='tingyul')
zone_data_proto.ClearField('spawn_point_ids')
for index, locator in enumerate(self._world_spawn_point_locators):
spawn_point_id = zone_data_proto.spawn_point_ids[index] if zone_data_proto.spawn_point_ids else None
spawn_point = WorldSpawnPoint(index, locator, (self.id), spawn_point_id=spawn_point_id)
self._world_spawn_points[spawn_point.spawn_point_id] = spawn_point
spawn_point.on_add()
self._world_spawn_point_locators = None
self.lot.load(gameplay_zone_data)
for | |
= self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
out = self.drop2(out)
out = self.relu1(out)
out = self.fc2(out)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class GRU(nn.Module):
"""
It is a deep network with one GRU layer, which are further fed into one fully connected layers.
"""
def __init__(self, input_size, hidden_size, num_layers, num_classes = 2, dropout = 0.5):
super(GRU, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first = True, dropout = dropout)
self.linear = nn.Linear(hidden_size, num_classes)
def forward(self, x):
if torch.cuda.is_available():
x = x.cuda()
h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).cuda() # 2 for bidirection
else:
h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)) # 2 for bidirection
self.gru.flatten_parameters()
out, hn = self.gru(x, h0)
rearranged = hn[-1]
out = self.linear(rearranged)
out = torch.sigmoid(out)
return out
def initHidden(self, N):
return Variable(torch.randn(1, N, self.hidden_size))
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class RNN(nn.Module):
"""
It is a deep network with one LSTM layer, which are further fed into one fully connected layer.
"""
def __init__(self, input_size, hidden_size, num_layers, num_classes = 2, dropout = 0.5):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first = True, dropout = dropout)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
if torch.cuda.is_available():
x = x.cuda()
h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).cuda()
c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).cuda()
else:
h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
self.lstm.flatten_parameters()
out, hn = self.lstm(x, (h0, c0))
rearranged = hn[0][-1]
# Decode hidden state of last time step
out = self.fc(rearranged)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class BiRNN(nn.Module):
"""
It is a deep network with one bidirectional LSTM layer, which are further fed into one fully connected layer.
"""
def __init__(self, input_size, hidden_size, num_layers, num_classes = 2, dropout = 0.5):
super(BiRNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers,
batch_first = True, dropout = dropout, bidirectional=True)
self.fc = nn.Linear(hidden_size*2, num_classes) # 2 for bidirection
def forward(self, x):
if torch.cuda.is_available():
x = x.cuda()
h0 = Variable(torch.zeros(self.num_layers*2, x.size(0), self.hidden_size)).cuda() # 2 for bidirection
c0 = Variable(torch.zeros(self.num_layers*2, x.size(0), self.hidden_size)).cuda()
else:
h0 = Variable(torch.zeros(self.num_layers*2, x.size(0), self.hidden_size)) # 2 for bidirection
c0 = Variable(torch.zeros(self.num_layers*2, x.size(0), self.hidden_size))
self.lstm.flatten_parameters()
out, hn = self.lstm(x, (h0, c0))
hn = hn[0]
rearranged = hn[-2:].view(x.size(0), -1)
# Decode hidden state of last time step
out = self.fc(rearranged)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
# select model
if __name__ == "__main__":
if model_type in ['LogisticRegression', 'MLP', 'SNN']:
y = population[:, 1]
X = plpData[population[:, 0], :]
trainInds = population[:, population.shape[1] - 1] > 0
if class_weight == -1:
loss = tu.FocalLoss(gamma = 5)
else:
if class_weight == 0:
weights = float(np.count_nonzero(y))/y.shape[0]
class_weight = [1 - weights, weights]
else:
class_weight = [class_weight, 1]
class_weight = 1/torch.Tensor(class_weight)
if torch.cuda.is_available():
class_weight = class_weight.cuda()
loss=nn.CrossEntropyLoss(weight = class_weight)
print("Dataset has %s rows and %s columns" % (X.shape[0], X.shape[1]))
print("population loaded- %s rows and %s columns" % (np.shape(population)[0], np.shape(population)[1]))
###########################################################################
l1regularization = False
if train:
pred_size = int(np.sum(population[:, population.shape[1] - 1] > 0))
print("Calculating prediction for train set of size %s" % (pred_size))
test_pred = np.zeros(pred_size) # zeros length sum(population[:,population.size[1]] ==i)
for i in range(1, int(np.max(population[:, population.shape[1] - 1]) + 1), 1):
testInd = population[population[:, population.shape[1] - 1] > 0, population.shape[1] - 1] == i
trainInd = (population[population[:, population.shape[1] - 1] > 0, population.shape[1] - 1] != i)
train_x = X[trainInds, :][trainInd, :]
train_y = y[trainInds][trainInd]
test_x = X[trainInds, :][testInd, :]
print("Fold %s split %s in train set and %s in test set" % (i, train_x.shape[0], test_x.shape[0]))
print("Train set contains %s outcomes " % (np.sum(train_y)))
train_x = train_x.toarray()
test_x = test_x.toarray()
if autoencoder:
print('first train stakced autoencoder')
encoding_size = 256
if vae:
auto_model = VAE(input_size=train_x.shape[1], encoding_size=encoding_size)
else:
auto_model = AutoEncoder(input_size=train_x.shape[1], encoding_size=encoding_size)
if torch.cuda.is_available():
auto_model = auto_model.cuda()
clf = tu.Estimator(auto_model)
clf.compile(optimizer=torch.optim.Adam(auto_model.parameters(), lr=1e-3, weight_decay = w_decay),
loss=nn.MSELoss())
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs, autoencoder = autoencoder, vae = vae)
#split to batch for large dataset
train_batch = tu.batch(train_x, batch_size=32)
train_x = np.array([]).reshape(0, encoding_size)
for train in train_batch:
encode_train = auto_model.get_encode_features(train)
train_x = np.concatenate((train_x, encode_train), axis=0)
#train_x = auto_model.get_encode_features(train_x.toarray())
#test_x = auto_model.get_encode_features(test_x.toarray())
test_batch = tu.batch(test_x, batch_size=32)
test_x = np.array([]).reshape(0, encoding_size)
for test in test_batch:
encode_Test = auto_model.get_encode_features(test)
test_x = np.concatenate((test_x, encode_Test), axis=0)
del auto_model
del clf
# train on fold
print("Training fold %s" % (i))
start_time = timeit.default_timer()
if model_type == 'LogisticRegression':
model = LogisticRegression(train_x.shape[1])
l1regularization = True
elif model_type == 'SNN':
model = SNN(train_x.shape[1], size)
else:
model = MLP(train_x.shape[1], size)
if torch.cuda.is_available():
model = model.cuda()
clf = tu.Estimator(model)
clf.compile(optimizer=torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay = w_decay),
loss=loss)
#if not autoencoder:
# train_x = train_x.toarray()
# test_x = test_x.toarray()
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs, l1regularization = l1regularization)
ind = (population[:, population.shape[1] - 1] > 0)
ind = population[ind, population.shape[1] - 1] == i
test_input_var = torch.from_numpy(test_x.astype(np.float32))
test_batch = tu.batch(test_x, batch_size = 32)
temp = []
for test in test_batch:
pred_test1 = model.predict_proba(test)[:, 1]
temp = np.concatenate((temp, pred_test1), axis = 0)
#temp = model.predict_proba(test_input_var)[:, 1]
#temp = preds.data.cpu().numpy().flatten()
#print temp
test_pred[ind] = temp
print("Prediction complete: %s rows " % (np.shape(test_pred[ind])[0]))
print("Mean: %s prediction value" % (np.mean(test_pred[ind])))
# merge pred with indexes[testInd,:]
test_pred.shape = (population[population[:, population.shape[1] - 1] > 0, :].shape[0], 1)
prediction = np.append(population[population[:, population.shape[1] - 1] > 0, :], test_pred, axis=1)
# train final:
else:
print("Training final neural network model on all train data...")
print("X- %s rows and Y %s length" % (X[trainInds, :].shape[0], y[trainInds].shape[0]))
start_time = timeit.default_timer()
train_x = X[trainInds, :]
train_x = train_x.toarray()
train_y = y[trainInds]
if not os.path.exists(modelOutput):
os.makedirs(modelOutput)
if autoencoder:
encoding_size = 256
if vae:
auto_model = VAE(input_size=train_x.shape[1], encoding_size=encoding_size)
else:
auto_model = AutoEncoder(input_size=train_x.shape[1], encoding_size=encoding_size)
if torch.cuda.is_available():
auto_model = auto_model.cuda()
clf = tu.Estimator(auto_model)
clf.compile(optimizer=torch.optim.Adam(auto_model.parameters(), lr=1e-3, weight_decay=w_decay),
loss=nn.MSELoss())
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs, autoencoder=autoencoder, vae = vae)
#train_x = auto_model.get_encode_features(train_x.toarray())
train_batch = tu.batch(train_x, batch_size=32)
train_x = np.array([]).reshape(0, encoding_size)
for train in train_batch:
encode_train = auto_model.get_encode_features(train)
train_x = np.concatenate((train_x, encode_train), axis=0)
joblib.dump(auto_model, os.path.join(modelOutput, 'autoencoder_model.pkl'))
del auto_model
del clf
print('the final parameter epochs %.2f weight_decay %.2f' %(epochs,w_decay))
if model_type == 'LogisticRegression':
model = LogisticRegression(train_x.shape[1])
l1regularization = True
elif model_type == 'SNN':
model = SNN(train_x.shape[1], size)
else:
model = MLP(train_x.shape[1], size)
#if not autoencoder:
# train_x = train_x.toarray()
if torch.cuda.is_available():
model = model.cuda()
clf = tu.Estimator(model)
clf.compile(optimizer=torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay = w_decay),
loss=loss)
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs, l1regularization = l1regularization)
end_time = timeit.default_timer()
print("Training final took: %.2f s" % (end_time - start_time))
# save the model:
print("Model saved to: %s" % (modelOutput))
joblib.dump(model, os.path.join(modelOutput,'model.pkl'))
elif model_type in ['CNN', 'RNN', 'CNN_LSTM', 'CNN_MLF', 'CNN_MIX', 'GRU', 'BiRNN', 'CNN_MULTI', 'ResNet']:
#print 'running model', model_type
y = population[:, 1]
#plpData = plpData[population[:, 0], :]
#config = tf.ConfigProto()
#config.gpu_options.allow_growth = True
#with tf.Session(config=config) as sess:
# X = tf.sparse_reorder(plpData)
# X = tf.sparse_tensor_to_dense(X)
# X = sess.run(X)
#tu.forward_impute_missing_value(X)
X = plpData.to_dense().numpy()
X = X[np.int64(population[:, 0]), :]
'''
p_ids_in_cov = set(covariates[:, 0])
full_covariates = np.array([]).reshape(0,4)
default_covid = covariates[0, 1]
timeid_len = len(set(covariates[:, -2]))
for p_id in population[:, 0]:
if p_id not in p_ids_in_cov:
tmp_x = np.array([p_id, default_covid, 1, 0]).reshape(1,4) #default cov id, timeid=1
full_covariates = np.concatenate((full_covariates, tmp_x), axis=0)
else:
tmp_x = covariates[covariates[:, 0] == p_id, :]
full_covariates = np.concatenate((full_covariates, tmp_x), axis=0)
trainInds = population[:, population.shape[1] - 1] > 0
X, patient_keys = tu.convert_to_temporal_format(full_covariates, timeid_len= timeid_len)
full_covariates = []
'''
if class_weight == | |
# anvil_mods.py
import pandas as pd
import numpy as np
import shapely
import geopandas as gpd
import quandl
from fred import Fred
# demo api key
quandl.ApiConfig.api_key = "<KEY>"
def formatIndicatorLikeQuandl(indicator, **kwargs):
"""
Uses the FRED module to access data not included
in QUANDL's dataset. Limits the returned FRED data
to only date and value fields supplied.
Accepts a FRED-formatted string for the desired
economic index (indicator).
Returns the formatted indicator as a pandas
DataFrame for downstream processing, or
an error message.
"""
try:
# set fred instance: demo API key
fr = Fred(api_key='<KEY>',response_type='df')
# get the index and limit to start_date=start_date, end_date=end_date
indicator = fr.series.observations(indicator).loc[:, ('date', 'value')]
# drop nans
indicator.dropna(inplace=True)
# convert str date to datetime
indicator['date'] = pd.to_datetime(indicator['date'])
# check if start and end dates are present
if kwargs:
# create date mask for only dates within period of dataset
date_mask = (indicator['date'] >= kwargs['start_date']) & (indicator['date'] <= kwargs['end_date'])
# filter
indicator = indicator[date_mask]
# set the index to the date for index processing downstream
indicator.set_index('date', drop=True, inplace=True)
# rename the year col for consistency
indicator.rename({'value':'Value'}, axis=1, inplace=True)
except Exception as e:
return e
# return the index
return indicator
def convertGeoJsonGeometry(data):
"""
Convert JSON features into shapely
geometry and then convert entire json data
object into geopandas dataframe.
Accepts a JSON data object.
Returns a geopandas geodataframe or an
error.
"""
try:
# convert features to shapes so it can be converted to GDF
for d in data['features']:
d['geometry'] = shapely.geometry.shape(d['geometry'])
# covnvert to geopandas
geoframe = gpd.GeoDataFrame(pd.io.json.json_normalize(data['features'])) # comes as a geojson feature collection
# replace prefix in column names
geoframe.columns = geoframe.columns.str.replace('properties.', '')
except Exception as e:
return e
return geoframe
def convertSpecGeoJsonGeometry(data, cols):
"""
Convert JSON features into shapely
geometry and then convert entire json data
object into geopandas dataframe.
Accepts a JSON data object as well as a
list of columns to create for the dataframe
from properties listed in the JSON object.
Returns a geopandas geodataframe or an
error.
"""
try:
# extract all data and put into single list
all_parcels = []
# for each feature in the data
for feature in data['features']:
# dict container
parcel = {}
# get the keys for the feature set
keys = feature.keys()
# loop through the keys
for key in keys:
if key == 'geometry':
# convert features to shapes so it can be converted to GDF
parcel[key] = shapely.geometry.shape(feature[key])
elif key == 'properties':
# for each desired column in the property set
for col in cols:
# get property name and append to parcel
parcel[col] = feature[key][col]
else: # skip any other keys
pass
# append entire parcel to all_parcels
all_parcels.append(parcel)
# covnvert to geopandas
geoframe = gpd.GeoDataFrame(all_parcels)
except Exception as e:
return e
return geoframe
def getPeriodicIndexMovement(indicator):
"""
Get the movement of the index (a nx1 DF) for each
year desired.
Accepts a pandas DataFrame, which is an index
of economic indicators.
Note that the column values 'Year' and 'Value' are
baked into QUANDL data. Will need to check for changes
in future. A tripwire assert is added in case the change
occurs.
Returns either a numpy float val or an error message.
"""
try:
# trip wire in case col values change in QUANDL
assert 'Value' in indicator.columns, 'getIndexMovement() Value column value has changed. Edit function definition and try again..'
# set the year of the non res const for grouping
indicator['Year'] = indicator.index.year
# group the years and get the sum of the differences for each year
indicator_mvt = indicator.groupby(['Year'])['Value'].apply(lambda x: x.diff().sum())
except Exception as e:
return e
return indicator_mvt
def getAnnualIndexMovement(indicator):
"""
Get the movement of the index (a nx1 DF) for each year desired
Accepts a pd.DataFrame, which is an index
of economic indicators.
Note that the column values 'Year' and 'Value' are
baked into QUANDL data. Will need to check for changes
in future. A tripwire fault is added in case the change
occurs.
Returns either a tuple of pd.DataFrames or an error message.
"""
try:
# trip wire in case col values change in QUANDL
assert 'Value' in indicator.columns, 'getIndexMovement() Value column value has changed. Edit function definition and try again..'
# group the years and get the sum of the differences for each year
indicator_mvt = indicator.diff(-1)
# convert index to only year for .get() lookup
indicator_mvt.index = indicator_mvt.index.year
except Exception as e:
return e
# return a series
return indicator_mvt.squeeze()
def spatialJoinFeatures(parcels, features):
"""
Spatially join each parcel with the feature dataset
by intersecting based on geometry.
Parcels is a geopandas dataframe. The columns in this
frame should only be [['buff_dist', 'parcel']].
Features is a geopandas dataframe. Contains only
geometry and feature names columns.
Returns the spaital join of the two input
geopandas dataframes. Resulting frame has
4 columns: geometry, feature name, parcel
name, and index_right.
"""
try:
assert isinstance(parcels, gpd.GeoDataFrame), 'spatialJoinAmmenities first argument must be a geodataframe. You passed an %r' % type(parcels)
assert isinstance(features, gpd.GeoDataFrame), 'spatialJoinAmmenities second argument must be a geodataframe. You passed an %r' % type(features)
# make a container
parcels_w_features = gpd.GeoDataFrame()
# chunk the data to make memory usage more efficient
for chunk in np.array_split(parcels, np.round(parcels.index.size/100)):
increment = 500
iter1 = 0
iter2 = increment
size = chunk.index.size
# convert chunk back to GeoDataFrame for sjoin operation
chunk = gpd.GeoDataFrame(chunk)
if 'buff_dist' in chunk.columns: # set the right geometry in case of buffer distance
chunk = chunk.set_geometry('buff_dist')
# iterate through each chunk
while iter1 < size:
# do remaining rows
if iter2 > size:
temp_df = gpd.tools.sjoin(chunk.iloc[iter1:], features)
# iterate through sequence iter1:iter2 to use memory more efficiently
else:
temp_df = gpd.tools.sjoin(chunk.iloc[iter1:iter2], features)
# save memory if empty
if temp_df.empty:
del(temp_df)
else: # combine parcels_w_features and temp_df
parcels_w_features = pd.concat([parcels_w_features, temp_df])
# free up memory
del(temp_df)
# increment iterators
iter1=iter2
iter2+=increment
# break loop when finished
if iter1 > size:
break
except Exception as e:
return e
# return the result w/o the index_right column added with concat
return parcels_w_features.drop('index_right', axis=1)
def getCountForSpatialJoin(search_parcels, record_parcels, search_col1, search_col2):
"""
Computes the number of times each parcel
appears in the results of a spatial join.
Accepts:
search_parcels, a pd.Series
record_parcels is a gpd.GeoDataFrame
search_col1 is the name to match
search_col2 is the address to match to ensure
duplicates are removed.
Returns a pandas Series of parcel counts indexed
by the parcel.
"""
try:
assert isinstance(search_col1, str), 'Param search_col1 should be type str. Got %r instead.' % type(search_col1)
assert isinstance(search_col2, str), 'Param search_col2 should be type str. Got %r instead.' % type(search_col2)
# temp container
counts = {}
# for each parcel
for parcel in search_parcels.unique():
# get unique values as pd.Series for feature names and count how many
# this will bring up non-unique parcels, so you must filter for unique again.
items = record_parcels[record_parcels['parcel'] == parcel].loc[:,(search_col1, search_col2)].drop_duplicates(search_col2)[search_col1]
# count the number of roads in each unique record
count = 0
for item in items:
# count how many items in this parcel in case of semicolon delimiter
splits = len(item.split(';'))
# if more than 1, increment count of roads by that number
if splits > 1:
count += splits
# if 1, increment count of roads by 1
else:
count += 1
# set this parcel's count entry
counts[parcel] = count
except Exception as e:
return e
# apply count to features nearby col in parcels_w_hist and return series
return pd.Series(counts)
def getDateIntvlByParcel(parcels):
"""
Calculate days between sales of each | |
not in ['_BPM', 'OEMK'] and (Comp.Offset & 0xFFF > 0):
raise Exception("Component '%s' %x is not aligned at 4KB boundary, " \
"please adjust padding size for IPAD/OPAD in BoardConfig.py and rebuild !" % (CompBpdtDict[Desc.Sig], Comp.Offset))
Desc.Offset = Comp.Offset - Bp0.Offset
# Last 4k in bios region is reserved for bootloader, throw exception if any component falls in that range
if (Bp1.Offset + Bp1.Length - 0x1000) <= (Desc.Offset + Desc.Size) <= (Bp1.Offset + Bp1.Length):
raise Exception("Component '%s' offset is in bootloader reserved region, please try to reduce compoent size !" % CompBpdtDict[Desc.Sig])
print ("Flash map was patched successfully!")
return 0
def AddComponent (Root, Path, Before = '$', FilePath = ''):
Nodes = Path.split('/')
ParentPath = '/'.join(Nodes[:-1])
DirComp = LocateComponent (Root, ParentPath)
if not DirComp:
print ('Cannot find DIR %s !' % '/'.join(Nodes[:-1]))
return -1
if DirComp.Type != COMPONENT.TYPE_DIR:
print ('Can only add FILE type !')
return -2
Index = None
if Before == '$':
# Add to end
Index = len(DirComp.Child)
elif Before == '^':
# Add to top
Index = 0
else:
for Idx, File in enumerate(DirComp.Child):
if Before == File.Name:
Index = Idx
if Index is None:
print ('Cannot find FILE %s !' % Before)
return -3
else:
Length = os.path.getsize(FilePath) if FilePath else 0x1000
Comp = COMPONENT (Nodes[-1], COMPONENT.TYPE_FILE, 0, Length)
Comp.SetData (FilePath)
DirComp.AddChild (Comp, Index)
return 0
def RemoveComponent (Root, Path):
Nodes = Path.split('/')
ParentPath = '/'.join(Nodes[:-1])
DirComp = LocateComponent (Root, ParentPath)
if not DirComp:
print ('Cannot find DIR %s !' % '/'.join(Nodes[:-1]))
return -1
if DirComp.Type != COMPONENT.TYPE_DIR:
print ('Can only replace FILE type !')
return -2
Index = None
for Idx, File in enumerate(DirComp.Child):
if File.Name == Nodes[-1]:
Index = Idx
break
if Index is None:
print ('Cannot find FILE %s !' % Path)
return -3
else:
del DirComp.Child[Index]
return 0
def ReplaceComponent (Root, Path, FilePath):
Comp = LocateComponent (Root, Path)
if not Comp:
print ('Cannot find FILE %s !' % Path)
return -1
if Comp.Type != COMPONENT.TYPE_FILE:
print ('Can only replace FILE type !' % Path)
return -2
Comp.Length = os.path.getsize(FilePath) if FilePath else 0x1000
if FilePath:
Comp.SetData (FilePath)
return 0
def CopyComponent (Root, Path, IfwiData):
print ("COPY BP0 BPDT to BP1 BPDT ...")
# Backup BP0 BPDT and BP1 SBPDT
Bp1 = LocateComponent (Root, 'ROOT/IFWI/BP1')
Bp0Bpdt = LocateComponent (Root, 'ROOT/IFWI/BP0/BPDT')
Bp1Bpdt = LocateComponent (Root, 'ROOT/IFWI/BP1/BPDT')
Bp1SBpdt = LocateComponent (Root, 'ROOT/IFWI/BP1/SBPDT')
Bp0BpdtData = bytearray(IfwiData[Bp0Bpdt.Offset :Bp0Bpdt.Offset + Bp0Bpdt.Length])
Bp1SBpdtData = bytearray(IfwiData[Bp1SBpdt.Offset:Bp1SBpdt.Offset + Bp1SBpdt.Length])
# Copy to BP0 BPDT to BP1 BPDT
Bp1SBpdtOffset = Bp1Bpdt.Offset + Bp0Bpdt.Length
IfwiData[Bp1Bpdt.Offset:Bp1SBpdtOffset] = Bp0BpdtData
# Append original BP1 SBPDT
Bp1SBpdtEndOffset = Bp1SBpdtOffset + Bp1SBpdt.Length
IfwiData[Bp1SBpdtOffset:Bp1SBpdtEndOffset] = Bp1SBpdtData
Padding = Bp1.Offset + Bp1.Length - Bp1SBpdtEndOffset
if Padding < 0:
print ('Insufficiant space in BP1 partition !')
return -1
IfwiData[Bp1SBpdtEndOffset:Bp1SBpdtEndOffset+Padding] = '\xff' * Padding
# Fix Sbpdt length in BP1 BPDT
Offset = Bp1Bpdt.Offset
BpdtHdr = BPDT_HEADER.from_buffer(IfwiData, Offset)
Offset += sizeof(BPDT_HEADER)
for Idx in range(BpdtHdr.DescCnt):
BpdtEntry = BPDT_ENTRY.from_buffer(IfwiData, Offset)
if 'BpdtSbpdt' == str(BpdtEntry.Type):
BpdtEntry.SubPartSize = Bp1SBpdt.Length
Offset += sizeof(BPDT_ENTRY)
# Fix Sbpdt headers
Offset = Bp1SBpdtOffset
BpdtHdr = BPDT_HEADER.from_buffer(IfwiData, Offset)
Offset += sizeof(BPDT_HEADER)
for Idx in range(BpdtHdr.DescCnt):
BpdtEntry = BPDT_ENTRY.from_buffer(IfwiData, Offset)
BpdtEntry.SubPartOffset += (Bp0Bpdt.Length - Bp1Bpdt.Length)
Offset += sizeof(BPDT_ENTRY)
print ("Done!")
return 0
def CreateDirData (Dir, IfwiData):
# Claculate new DIR length and creaet new DIR data
SupportList = ['BpdtIbb', 'BpdtObb']
if Dir.Name not in SupportList:
raise Exception ('Only %s are supported !' % ' '.join(SupportList))
Adjust = True
Offset = len(Dir.Child) * sizeof(SUBPART_DIR_ENTRY) + sizeof(SUBPART_DIR_HEADER)
SubDirHdr = SUBPART_DIR_HEADER.from_buffer(IfwiData, Dir.Offset)
DirData = bytearray(SubDirHdr) + '\xff' * (Offset - sizeof(SUBPART_DIR_HEADER))
SubDirHdr = SUBPART_DIR_HEADER.from_buffer(DirData)
SubDirHdr.NumOfEntries = len(Dir.Child)
for Idx, Comp in enumerate(Dir.Child):
Delta = 0
Parts = os.path.splitext(Comp.Name)
if len(Parts) > 1 and Parts[1] in ['.man', '.met']:
Align = 1
elif Comp.Name in ['IPAD', 'OPAD']:
Align = 0x40
else:
Align = FILE_ALIGN
Delta = Dir.Offset & (FILE_ALIGN - 1)
NextOffset = ((Offset + Delta + Align - 1) & ~(Align - 1))
Count = NextOffset - Offset
if Adjust:
Adjust = False
Count -= Delta
DirData.extend('\xff' * Count)
CompData = Comp.GetData()
if CompData:
DirData.extend(CompData)
else:
DirData.extend(IfwiData[Comp.Offset : Comp.Offset + Comp.Length])
EntryOffset = Idx * sizeof(SUBPART_DIR_ENTRY) + sizeof(SUBPART_DIR_HEADER)
DirData[EntryOffset:EntryOffset+sizeof(SUBPART_DIR_ENTRY)] = '\x00' * sizeof(SUBPART_DIR_ENTRY)
SubDir = SUBPART_DIR_ENTRY.from_buffer(DirData, EntryOffset)
SubDir.EntryName = Comp.Name
SubDir.EntryOffset = NextOffset - Delta
SubDir.EntrySize = Comp.Length
SubDir.Reserved1 = 0
SubDir.Reserved2 = 0
NextOffset += Comp.Length
Offset = NextOffset
Align = FILE_ALIGN
NextOffset = ((Offset + Align - 1) & ~(Align - 1))
DirData.extend('\xff' * (NextOffset - Offset))
# Update checksum
SubDirHdr = SUBPART_DIR_HEADER.from_buffer(DirData)
SubDirHdr.Checksum = 0
Length = SubDirHdr.NumOfEntries * sizeof(SUBPART_DIR_ENTRY) + sizeof(SUBPART_DIR_HEADER)
SumBuf = (c_uint8 * Length).from_buffer(DirData)
SubDirHdr.Checksum = (~sum(SumBuf) + 1) & 0xFF
Remaining = (Dir.Offset + len(DirData)) & (FILE_ALIGN - 1)
if Remaining:
# Not page aligned, add padding
DirData.extend('\xff' * (FILE_ALIGN - Remaining))
return DirData
def RefreshIfwiForDir (Dir, IfwiData):
# Claculate new DIR length and creaet new DIR data
DirData = CreateDirData (Dir, IfwiData)
Length = len (DirData)
AdjustLength = Length - Dir.Length
if (Dir.Offset + Length) & (FILE_ALIGN - 1):
print hex(Dir.Offset), hex(Length)
print ('Dir total size needs to be 4KB aligned !')
# Remember original SBPDT offset
OrgBpdtOffset = Dir.Parent.Parent.Child[0].Offset
OrgSbpdtOffset = Dir.Parent.Parent.Child[1].Offset
# Adjust offset and size for peer and up level in tree
OldDir = Dir
while Dir.Type != COMPONENT.TYPE_BP:
for Each in Dir.Parent.Child:
if Each.Offset > Dir.Offset:
Each.Offset += AdjustLength
Dir.Length += AdjustLength
Dir = Dir.Parent
Dir = OldDir
# Update parent BPDT header info in IFWI data
Parent = Dir.Parent
BpdtHdr = BPDT_HEADER.from_buffer(IfwiData, Parent.Offset)
Base = Parent.Offset + sizeof(BPDT_HEADER)
Found = False
for Idx in range(BpdtHdr.DescCnt):
BpdtEntry = BPDT_ENTRY.from_buffer(IfwiData, Base + Idx * sizeof(BPDT_ENTRY))
Comps = [x for x in Parent.Child if x.Name == str(BpdtEntry.Type)]
if len(Comps) > 1:
raise Exception ('Found duplicated DIR %s !', BpdtEntry.Type)
BpdtEntry.SubPartOffset = Comps[0].Offset - Parent.Parent.Offset
if Dir.Name == str(BpdtEntry.Type):
BpdtEntry.SubPartSize = Length
Found = True
if not Found:
raise Exception ('Could not find DIR %s !', Dir.Name)
# Update SBPDT DIR header in IFWI data
BpComp = Parent.Parent
if Parent.Name == 'BPDT':
BpdtHdr = BPDT_HEADER.from_buffer (IfwiData, OrgSbpdtOffset)
BpdtHdr.XorSum = 0
BaseOffset = OrgSbpdtOffset + sizeof(BPDT_HEADER)
for Idx in range(BpdtHdr.DescCnt):
BpdtEntry = BPDT_ENTRY.from_buffer(IfwiData, BaseOffset + Idx * sizeof(BPDT_ENTRY))
BpdtEntry.SubPartOffset += AdjustLength
if (BpdtEntry.SubPartOffset + BpdtEntry.SubPartSize) > BpComp.Length:
raise Exception ('Insufficiant space in layout !')
else:
# 'SBPDT', update length in BPDT
BpdtHdr = BPDT_HEADER.from_buffer (IfwiData, OrgBpdtOffset)
BpdtHdr.XorSum = 0
BaseOffset = OrgBpdtOffset + sizeof(BPDT_HEADER)
for Idx in range(BpdtHdr.DescCnt):
BpdtEntry = BPDT_ENTRY.from_buffer(IfwiData, BaseOffset + Idx * sizeof(BPDT_ENTRY))
if str(BpdtEntry.Type) == 'BpdtSbpdt':
BpdtEntry.SubPartSize += AdjustLength
if (BpdtEntry.SubPartOffset + BpdtEntry.SubPartSize) > BpComp.Length:
raise Exception ('Insufficiant space in layout !')
# Generate actual final IFWI Data
if AdjustLength > 0:
IfwiData[:] = IfwiData[:OldDir.Offset] + DirData + \
IfwiData[OldDir.Offset + OldDir.Length - AdjustLength : BpComp.Offset + BpComp.Length - AdjustLength] + \
IfwiData[BpComp.Offset + BpComp.Length:]
else:
AdjustLength = -AdjustLength
IfwiData[:] = IfwiData[:OldDir.Offset] + DirData + \
IfwiData[OldDir.Offset + OldDir.Length + AdjustLength: BpComp.Offset + BpComp.Length] + \
'\xff' * AdjustLength + IfwiData[BpComp.Offset + BpComp.Length:]
return 0
def ParseIfwiLayout (IfwiImgData):
SpiDescriptor = SPI_DESCRIPTOR.from_buffer(IfwiImgData, 0)
if SpiDescriptor.FlValSig != SpiDescriptor.DESC_SIGNATURE:
return None
RgnList = []
Regions = [("descriptor", "DESC"), ("ifwi" , "IFWI"), ("pdr", "PDR"), ("dev_expansion", "DEVE")]
for Rgn in Regions:
Start, Limit = FindSpiRegion (SpiDescriptor, Rgn[0])
if Start is None:
continue
RgnList.append((Rgn[1], Start, Limit - Start + 1))
RgnList.sort (key = lambda Rgn : Rgn[1])
Root = COMPONENT ('ROOT', COMPONENT.TYPE_IMG, 0, len(IfwiImgData))
for Idx, Rgn in enumerate(RgnList):
Comp = COMPONENT (Rgn[0], COMPONENT.TYPE_RGN, Rgn[1], Rgn[2])
if Rgn[0] == 'IFWI':
ParseIfwiRegion (Comp, IfwiImgData)
Root.AddChild (Comp)
return Root
def ManipulateIfwi (Action, Path, IfwiData, FileName = '', Before = '$'):
print ('%s %s ...' % (Action, Path))
Root = ParseIfwiLayout (IfwiData)
if Action == "REMOVE":
Ret = RemoveComponent (Root, Path)
elif Action == "ADD":
Ret = AddComponent (Root, Path, Before, FileName)
elif Action == "REPLACE":
Ret = ReplaceComponent (Root, Path, FileName)
elif Action == "COPY":
Ret = CopyComponent (Root, 'ROOT/IFWI/BP0/BPDT', IfwiData)
else:
Ret = -100
if Ret == | |
<gh_stars>0
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.rarithmetic import r_uint
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rtyper import rclass
from rpython.jit.metainterp.history import (AbstractFailDescr, ConstInt,
INT, FLOAT, REF, VOID)
from rpython.jit.backend.aarch64 import registers as r
from rpython.jit.backend.aarch64.codebuilder import OverwritingBuilder
from rpython.jit.backend.aarch64.callbuilder import Aarch64CallBuilder
from rpython.jit.backend.arm import conditions as c, shift
from rpython.jit.backend.aarch64.regalloc import check_imm_arg
from rpython.jit.backend.aarch64.arch import JITFRAME_FIXED_SIZE, WORD
from rpython.jit.backend.aarch64 import locations
from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler
from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
from rpython.jit.backend.llsupport.regalloc import get_scale
from rpython.jit.metainterp.history import TargetToken
from rpython.jit.metainterp.resoperation import rop
def gen_comp_op(name, flag):
def emit_op(self, op, arglocs):
l0, l1, res = arglocs
self.emit_int_comp_op(op, l0, l1)
self.mc.CSET_r_flag(res.value, c.get_opposite_of(flag))
emit_op.__name__ = name
return emit_op
def gen_float_comp_op(name, flag):
def emit_op(self, op, arglocs):
l0, l1, res = arglocs
self.emit_float_comp_op(op, l0, l1)
self.mc.CSET_r_flag(res.value, c.get_opposite_of(flag))
emit_op.__name__ = name
return emit_op
def gen_float_comp_op_cc(name, flag):
def emit_op(self, op, arglocs):
l0, l1 = arglocs
self.emit_float_comp_op(op, l0, l1)
return flag
emit_op.__name__ = name
return emit_op
class ResOpAssembler(BaseAssembler):
def imm(self, v):
return locations.imm(v)
def int_sub_impl(self, op, arglocs, flags=0):
l0, l1, res = arglocs
if flags:
s = 1
else:
s = 0
if l1.is_imm():
value = l1.getint()
assert value >= 0
self.mc.SUB_ri(res.value, l0.value, value, s)
else:
self.mc.SUB_rr(res.value, l0.value, l1.value, s)
def emit_op_int_sub(self, op, arglocs):
self.int_sub_impl(op, arglocs)
def int_add_impl(self, op, arglocs, ovfcheck=False):
l0, l1, res = arglocs
assert not l0.is_imm()
if ovfcheck:
s = 1
else:
s = 0
if l1.is_imm():
self.mc.ADD_ri(res.value, l0.value, l1.value, s)
else:
self.mc.ADD_rr(res.value, l0.value, l1.value, s)
def emit_op_int_add(self, op, arglocs):
self.int_add_impl(op, arglocs)
emit_op_nursery_ptr_increment = emit_op_int_add
def emit_comp_op_int_add_ovf(self, op, arglocs):
self.int_add_impl(op, arglocs, True)
return 0
def emit_comp_op_int_sub_ovf(self, op, arglocs):
self.int_sub_impl(op, arglocs, True)
return 0
def emit_op_int_mul(self, op, arglocs):
reg1, reg2, res = arglocs
self.mc.MUL_rr(res.value, reg1.value, reg2.value)
def emit_comp_op_int_mul_ovf(self, op, arglocs):
reg1, reg2, res = arglocs
self.mc.SMULH_rr(r.ip0.value, reg1.value, reg2.value)
self.mc.MUL_rr(res.value, reg1.value, reg2.value)
self.mc.CMP_rr_shifted(r.ip0.value, res.value, 63)
return 0
def emit_op_int_and(self, op, arglocs):
l0, l1, res = arglocs
self.mc.AND_rr(res.value, l0.value, l1.value)
def emit_op_int_or(self, op, arglocs):
l0, l1, res = arglocs
self.mc.ORR_rr(res.value, l0.value, l1.value)
def emit_op_int_xor(self, op, arglocs):
l0, l1, res = arglocs
self.mc.EOR_rr(res.value, l0.value, l1.value)
def emit_op_int_lshift(self, op, arglocs):
l0, l1, res = arglocs
self.mc.LSL_rr(res.value, l0.value, l1.value)
def emit_op_int_rshift(self, op, arglocs):
l0, l1, res = arglocs
self.mc.ASR_rr(res.value, l0.value, l1.value)
def emit_op_uint_rshift(self, op, arglocs):
l0, l1, res = arglocs
self.mc.LSR_rr(res.value, l0.value, l1.value)
def emit_op_uint_mul_high(self, op, arglocs):
l0, l1, res = arglocs
self.mc.UMULH_rr(res.value, l0.value, l1.value)
def emit_int_comp_op(self, op, l0, l1):
if l1.is_imm():
self.mc.CMP_ri(l0.value, l1.getint())
else:
self.mc.CMP_rr(l0.value, l1.value)
def emit_float_comp_op(self, op, l0, l1):
self.mc.FCMP_dd(l0.value, l1.value)
emit_comp_op_float_lt = gen_float_comp_op_cc('float_lt', c.VFP_LT)
emit_comp_op_float_le = gen_float_comp_op_cc('float_le', c.VFP_LE)
emit_comp_op_float_eq = gen_float_comp_op_cc('float_eq', c.EQ)
emit_comp_op_float_ne = gen_float_comp_op_cc('float_ne', c.NE)
emit_comp_op_float_gt = gen_float_comp_op_cc('float_gt', c.GT)
emit_comp_op_float_ge = gen_float_comp_op_cc('float_ge', c.GE)
def emit_comp_op_int_lt(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.LT
def emit_comp_op_int_le(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.LE
def emit_comp_op_int_gt(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.GT
def emit_comp_op_int_ge(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.GE
def emit_comp_op_int_eq(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.EQ
emit_comp_op_ptr_eq = emit_comp_op_instance_ptr_eq = emit_comp_op_int_eq
def emit_comp_op_int_ne(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.NE
emit_comp_op_ptr_ne = emit_comp_op_instance_ptr_ne = emit_comp_op_int_ne
def emit_comp_op_uint_lt(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.LO
def emit_comp_op_uint_le(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.LS
def emit_comp_op_uint_gt(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.HI
def emit_comp_op_uint_ge(self, op, arglocs):
self.emit_int_comp_op(op, arglocs[0], arglocs[1])
return c.HS
emit_op_int_lt = gen_comp_op('emit_op_int_lt', c.LT)
emit_op_int_le = gen_comp_op('emit_op_int_le', c.LE)
emit_op_int_gt = gen_comp_op('emit_op_int_gt', c.GT)
emit_op_int_ge = gen_comp_op('emit_op_int_ge', c.GE)
emit_op_int_eq = gen_comp_op('emit_op_int_eq', c.EQ)
emit_op_int_ne = gen_comp_op('emit_op_int_ne', c.NE)
emit_op_uint_lt = gen_comp_op('emit_op_uint_lt', c.LO)
emit_op_uint_gt = gen_comp_op('emit_op_uint_gt', c.HI)
emit_op_uint_le = gen_comp_op('emit_op_uint_le', c.LS)
emit_op_uint_ge = gen_comp_op('emit_op_uint_ge', c.HS)
emit_op_ptr_eq = emit_op_instance_ptr_eq = emit_op_int_eq
emit_op_ptr_ne = emit_op_instance_ptr_ne = emit_op_int_ne
def emit_op_int_is_true(self, op, arglocs):
reg, res = arglocs
self.mc.CMP_ri(reg.value, 0)
self.mc.CSET_r_flag(res.value, c.EQ)
def emit_comp_op_int_is_true(self, op, arglocs):
self.mc.CMP_ri(arglocs[0].value, 0)
return c.NE
def emit_op_int_is_zero(self, op, arglocs):
reg, res = arglocs
self.mc.CMP_ri(reg.value, 0)
self.mc.CSET_r_flag(res.value, c.NE)
def emit_comp_op_int_is_zero(self, op, arglocs):
self.mc.CMP_ri(arglocs[0].value, 0)
return c.EQ
def emit_op_int_neg(self, op, arglocs):
reg, res = arglocs
self.mc.SUB_rr_shifted(res.value, r.xzr.value, reg.value)
def emit_op_int_invert(self, op, arglocs):
reg, res = arglocs
self.mc.MVN_rr(res.value, reg.value)
def emit_op_int_force_ge_zero(self, op, arglocs):
arg, res = arglocs
self.mc.MOVZ_r_u16(res.value, 0, 0)
self.mc.CMP_ri(arg.value, 0)
self.mc.B_ofs_cond(8, c.LT) # jump over the next instruction
self.mc.MOV_rr(res.value, arg.value)
# jump here
def emit_op_int_signext(self, op, arglocs):
arg, numbytes, res = arglocs
assert numbytes.is_imm()
if numbytes.value == 1:
self.mc.SXTB_rr(res.value, arg.value)
elif numbytes.value == 2:
self.mc.SXTH_rr(res.value, arg.value)
elif numbytes.value == 4:
self.mc.SXTW_rr(res.value, arg.value)
else:
raise AssertionError("bad number of bytes")
def emit_op_increment_debug_counter(self, op, arglocs):
base_loc, value_loc = arglocs
self.mc.LDR_ri(value_loc.value, base_loc.value, 0)
self.mc.ADD_ri(value_loc.value, value_loc.value, 1)
self.mc.STR_ri(value_loc.value, base_loc.value, 0)
def emit_op_check_memory_error(self, op, arglocs):
self.propagate_memoryerror_if_reg_is_null(arglocs[0])
def _genop_same_as(self, op, arglocs):
argloc, resloc = arglocs
if argloc is not resloc:
self.mov_loc_loc(argloc, resloc)
emit_op_same_as_i = _genop_same_as
emit_op_same_as_r = _genop_same_as
emit_op_same_as_f = _genop_same_as
emit_op_cast_ptr_to_int = _genop_same_as
emit_op_cast_int_to_ptr = _genop_same_as
def emit_op_float_add(self, op, arglocs):
arg1, arg2, res = arglocs
self.mc.FADD_dd(res.value, arg1.value, arg2.value)
def emit_op_float_sub(self, op, arglocs):
arg1, arg2, res = arglocs
self.mc.FSUB_dd(res.value, arg1.value, arg2.value)
def emit_op_float_mul(self, op, arglocs):
arg1, arg2, res = arglocs
self.mc.FMUL_dd(res.value, arg1.value, arg2.value)
def emit_op_float_truediv(self, op, arglocs):
arg1, arg2, res = arglocs
self.mc.FDIV_dd(res.value, arg1.value, arg2.value)
def emit_op_convert_float_bytes_to_longlong(self, op, arglocs):
arg, res = arglocs
self.mc.UMOV_rd(res.value, arg.value)
def emit_op_convert_longlong_bytes_to_float(self, op, arglocs):
arg, res = arglocs
self.mc.INS_dr(res.value, arg.value)
def math_sqrt(self, op, arglocs):
arg, res = arglocs
self.mc.FSQRT_dd(res.value, arg.value)
def threadlocalref_get(self, op, arglocs):
res_loc, = arglocs
ofs_loc = self.imm(op.getarg(1).getint())
calldescr = op.getdescr()
ofs = self.saved_threadlocal_addr
self.load_reg(self.mc, res_loc, r.sp, ofs)
scale = get_scale(calldescr.get_result_size())
signed = (calldescr.is_result_signed() != 0)
self._load_from_mem(res_loc, res_loc, ofs_loc, scale, signed)
emit_op_float_lt = gen_float_comp_op('float_lt', c.VFP_LT)
emit_op_float_le = gen_float_comp_op('float_le', c.VFP_LE)
emit_op_float_eq = gen_float_comp_op('float_eq', c.EQ)
emit_op_float_ne = gen_float_comp_op('float_ne', c.NE)
emit_op_float_gt = gen_float_comp_op('float_gt', c.GT)
emit_op_float_ge = gen_float_comp_op('float_ge', c.GE)
def emit_op_float_neg(self, op, arglocs):
arg, res = arglocs
self.mc.FNEG_d(res.value, arg.value)
def emit_op_float_abs(self, op, arglocs):
arg, res = arglocs
self.mc.FABS_d(res.value, arg.value)
def emit_op_cast_float_to_int(self, op, arglocs):
arg, res = arglocs
self.mc.FCVTZS_d(res.value, arg.value)
def emit_op_cast_int_to_float(self, op, arglocs):
arg, res = arglocs
self.mc.SCVTF_r(res.value, arg.value)
def emit_op_load_from_gc_table(self, op, arglocs):
res_loc, = arglocs
index = op.getarg(0).getint()
self.load_from_gc_table(res_loc.value, index)
def emit_op_load_effective_address(self, op, arglocs):
self._gen_address(arglocs[4], arglocs[0], arglocs[1], arglocs[3].value,
arglocs[2].value)
# result = base_loc + (scaled_loc << scale) + static_offset
def _gen_address(self, result, base_loc, scaled_loc, scale=0, static_offset=0):
assert scaled_loc.is_core_reg()
assert base_loc.is_core_reg()
if scale > 0:
self.mc.LSL_ri(r.ip0.value, scaled_loc.value, scale)
scaled_loc = r.ip0
else:
scaled_loc = scaled_loc
self.mc.ADD_rr(result.value, base_loc.value, scaled_loc.value)
self.mc.ADD_ri(result.value, result.value, static_offset)
def emit_op_debug_merge_point(self, op, arglocs):
pass
emit_op_jit_debug = emit_op_debug_merge_point
emit_op_keepalive = emit_op_debug_merge_point
emit_op_enter_portal_frame = emit_op_debug_merge_point
emit_op_leave_portal_frame = emit_op_debug_merge_point
# -------------------------------- fields -------------------------------
def emit_op_gc_store(self, op, arglocs):
value_loc, base_loc, ofs_loc, size_loc = arglocs
scale = get_scale(size_loc.value)
self._write_to_mem(value_loc, base_loc, ofs_loc, scale)
def _emit_op_gc_load(self, op, arglocs):
base_loc, ofs_loc, res_loc, nsize_loc = arglocs
nsize = nsize_loc.value
signed = (nsize < 0)
scale = get_scale(abs(nsize))
self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed)
emit_op_gc_load_i = _emit_op_gc_load
emit_op_gc_load_r = _emit_op_gc_load
emit_op_gc_load_f = _emit_op_gc_load
def emit_op_gc_store_indexed(self, op, arglocs):
value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs
assert index_loc.is_core_reg()
# add the base offset
if ofs_loc.value != 0:
if check_imm_arg(ofs_loc.value):
self.mc.ADD_ri(r.ip0.value, index_loc.value, ofs_loc.value)
else:
# ofs_loc.value is too large for an ADD_ri
self.load(r.ip0, ofs_loc)
self.mc.ADD_rr(r.ip0.value, r.ip0.value, index_loc.value)
index_loc = r.ip0
scale = get_scale(size_loc.value)
self._write_to_mem(value_loc, base_loc, index_loc, scale)
def _emit_op_gc_load_indexed(self, op, arglocs):
res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs
assert index_loc.is_core_reg()
nsize = nsize_loc.value
signed = (nsize < 0)
# add the base offset
if ofs_loc.value != 0:
if check_imm_arg(ofs_loc.value):
self.mc.ADD_ri(r.ip0.value, index_loc.value, ofs_loc.value)
else:
# ofs_loc.value is too large for an ADD_ri
self.load(r.ip0, ofs_loc)
self.mc.ADD_rr(r.ip0.value, r.ip0.value, index_loc.value)
index_loc = r.ip0
#
scale = get_scale(abs(nsize))
self._load_from_mem(res_loc, base_loc, index_loc, scale, signed)
emit_op_gc_load_indexed_i = _emit_op_gc_load_indexed
emit_op_gc_load_indexed_r = _emit_op_gc_load_indexed
emit_op_gc_load_indexed_f = _emit_op_gc_load_indexed
def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale):
# Write a value of size '1 << scale' at the address
# 'base_ofs + ofs_loc'. Note that 'scale' is not used to scale
# the offset!
assert base_loc.is_core_reg()
if scale == 3:
# WORD size
if value_loc.is_float():
if ofs_loc.is_imm():
self.mc.STR_di(value_loc.value, base_loc.value,
ofs_loc.value)
else:
self.mc.STR_dd(value_loc.value, base_loc.value,
ofs_loc.value)
return
if ofs_loc.is_imm():
self.mc.STR_ri(value_loc.value, base_loc.value,
ofs_loc.value)
else:
self.mc.STR_size_rr(3, value_loc.value, base_loc.value,
ofs_loc.value)
else:
if ofs_loc.is_imm():
self.mc.STR_size_ri(scale, value_loc.value, base_loc.value,
ofs_loc.value)
else:
self.mc.STR_size_rr(scale, value_loc.value, base_loc.value,
ofs_loc.value)
def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale,
signed=False):
# Load a value of '1 << scale' bytes, from the memory location
# 'base_loc + ofs_loc'. Note that 'scale' is not used to scale
# the offset!
#
if scale == 3:
# WORD
if res_loc.is_float():
if ofs_loc.is_imm():
self.mc.LDR_di(res_loc.value, base_loc.value, ofs_loc.value)
else:
self.mc.LDR_dr(res_loc.value, base_loc.value, ofs_loc.value)
return
| |
an instance of datastore_query.Cursor.')
self._cursor = value
cursor = property(fget=_GetCursor, fset=_SetCursor)
def _GetLimit(self):
"""Getter to be used for public limit property on query info."""
return self._limit
def _SetLimit(self, value):
"""Setter to be used for public limit property on query info.
Args:
value: A potential value for a limit.
Raises:
AttributeError: if query on the object is already final.
AttributeError: if the limit has already been set.
TypeError: if the value to be set is not a positive integer.
"""
if self._query_final is not None:
raise AttributeError('Can\'t set limit. Query info is final.')
if self._limit is not None:
raise AttributeError('Limit can\'t be set twice.')
if not isinstance(value, (int, long)) or value < 1:
raise TypeError('Limit must be a positive integer.')
self._limit = value
limit = property(fget=_GetLimit, fset=_SetLimit)
def _GetOrder(self):
"""Getter to be used for public order property on query info."""
return self._order
def _SetOrderAttrs(self):
"""Helper method to set _order_attrs using the value of _order.
If _order is not set, simply returns, else splits _order by commas and then
looks up each value (or its negation) in the _properties of the entity on
the query info object.
We look up directly in _properties rather than using the attribute names
on the object since only NDB property names will be used for field names.
Raises:
AttributeError: if one of the attributes in the order is not a property
on the entity.
"""
if self._order is None:
return
unclean_attr_names = self._order.strip().split(',')
result = []
for attr_name in unclean_attr_names:
ascending = True
if attr_name.startswith('-'):
ascending = False
attr_name = attr_name[1:]
attr = self._entity._properties.get(attr_name)
if attr is None:
raise AttributeError('Order attribute %s not defined.' % (attr_name,))
if ascending:
result.append(+attr)
else:
result.append(-attr)
self._order_attrs = tuple(result)
def _SetOrder(self, value):
"""Setter to be used for public order property on query info.
Sets the value of _order and attempts to set _order_attrs as well
by valling _SetOrderAttrs, which uses the value of _order.
If the passed in value is None, but the query is not final and the
order has not already been set, the method will return without any
errors or data changed.
Args:
value: A potential value for an order.
Raises:
AttributeError: if query on the object is already final.
AttributeError: if the order has already been set.
TypeError: if the order to be set is not a string.
"""
if self._query_final is not None:
raise AttributeError('Can\'t set order. Query info is final.')
if self._order is not None:
raise AttributeError('Order can\'t be set twice.')
if value is None:
return
elif not isinstance(value, basestring):
raise TypeError('Order must be a string.')
self._order = value
self._SetOrderAttrs()
order = property(fget=_GetOrder, fset=_SetOrder)
class EndpointsMetaModel(ndb.MetaModel):
"""Metaclass for EndpointsModel.
This exists to create new instances of the mutable class attributes for
subclasses and to verify ProtoRPC specific properties.
"""
def __init__(cls, name, bases, classdict):
"""Verifies additional ProtoRPC properties on an NDB model."""
super(EndpointsMetaModel, cls).__init__(name, bases, classdict)
# Reset the `_message_fields_schema` to `None` unless it was explicitly
# mentioned in the class definition. It's possible for this value to be
# set if a superclass had this value set by `_VerifyMessageFieldsSchema`
# then this subclass would keep that value, even if that was not the
# intended behavior.
if '_message_fields_schema' not in classdict:
cls._message_fields_schema = None
cls._alias_properties = {}
cls._proto_models = {}
cls._proto_collections = {}
cls._resource_containers = {}
cls._property_to_proto = ndb_utils.NDB_PROPERTY_TO_PROTO.copy()
cls._FixUpAliasProperties()
cls._VerifyMessageFieldsSchema()
cls._VerifyProtoMapping()
def _FixUpAliasProperties(cls):
"""Updates the alias properties map and verifies each alias property.
Raises:
AttributeError: if an alias property is defined beginning with
an underscore.
AttributeError: if an alias property is defined that conflicts with
an NDB property.
"""
for attr_name in dir(cls):
prop = getattr(cls, attr_name, None)
if isinstance(prop, EndpointsAliasProperty):
if attr_name.startswith('_'):
raise AttributeError('EndpointsAliasProperty %s cannot begin with an '
'underscore character.' % (attr_name,))
if attr_name in cls._properties:
raise AttributeError(PROPERTY_COLLISION_TEMPLATE % (attr_name,))
prop._FixUp(attr_name)
cls._alias_properties[prop._name] = prop
def _VerifyMessageFieldsSchema(cls):
"""Verifies that the preset message fields correspond to actual properties.
If no message fields schema was set on the class, sets the schema using the
default fields determing by the NDB properties and alias properties defined.
In either case, converts the passed in fields to an instance of
MessageFieldsSchema and sets that as the value of _message_fields_schema
on the class.
Raises:
TypeError: if a message fields schema was set on the class that is not a
list, tuple, dictionary, or MessageFieldsSchema instance.
"""
message_fields_schema = getattr(cls, '_message_fields_schema', None)
# Also need to check we aren't re-using from EndpointsModel
base_schema = getattr(BASE_MODEL_CLASS, '_message_fields_schema', None)
if message_fields_schema is None or message_fields_schema == base_schema:
message_fields_schema = cls._DefaultFields()
elif not isinstance(message_fields_schema,
(list, tuple, dict, MessageFieldsSchema)):
raise TypeError(BAD_FIELDS_SCHEMA_TEMPLATE %
(cls.__name__, message_fields_schema.__class__.__name__))
else:
for attr in message_fields_schema:
_VerifyProperty(cls, attr)
cls._message_fields_schema = MessageFieldsSchema(message_fields_schema,
name=cls.__name__)
def _VerifyProtoMapping(cls):
"""Verifies that each property on the class has an associated proto mapping.
First checks if there is a _custom_property_to_proto dictionary present and
then overrides the class to proto mapping found in _property_to_proto.
Then, for each property (NDB or alias), tries to add a mapping first by
checking for a message field attribute, and then by trying to infer based
on property subclass.
Raises:
TypeError: if a key from _custom_property_to_proto is not a valid NBD
property. (We don't allow EndpointsAliasProperty here because it
is not meant to be subclassed and defines a message_field).
TypeError: if after checking _custom_property_to_proto, message_field and
inference from a superclass, no appropriate mapping is found in
_property_to_proto.
"""
custom_property_to_proto = getattr(cls, '_custom_property_to_proto', None)
if isinstance(custom_property_to_proto, dict):
for key, value in custom_property_to_proto.iteritems():
if not utils.IsSubclass(key, ndb.Property):
raise TypeError('Invalid property class: %s.' % (key,))
cls._property_to_proto[key] = value
for prop in cls._EndpointsPropertyItervalues():
property_class = prop.__class__
cls._TryAddMessageField(property_class)
cls._TryInferSuperclass(property_class)
if property_class not in cls._property_to_proto:
raise TypeError('No converter present for property %s' %
(property_class.__name__,))
# TODO(dhermes): Consider renaming this optional property attr from
# "message_field" to something more generic. It can either be
# a field or it can be a method with the signature
# (property instance, integer index)
def _TryAddMessageField(cls, property_class):
"""Tries to add a proto mapping for a property class using a message field.
If the property class is already in the proto mapping, does nothing.
Args:
property_class: The class of a property from a model.
"""
if property_class in cls._property_to_proto:
return
message_field = getattr(property_class, 'message_field', None)
if message_field is not None:
cls._property_to_proto[property_class] = message_field
def _TryInferSuperclass(cls, property_class):
"""Tries to add a proto mapping for a property class by using a base class.
If the property class is already in the proto mapping, does nothing.
Descends up the class hierarchy until an ancestor class has more than one
base class or until ndb.Property is reached. If any class up the hierarchy
is already in the proto mapping, the method/field for the superclass is also
set for the propert class in question.
Args:
property_class: The class of a property from a model.
"""
if (property_class in cls._property_to_proto or
utils.IsSubclass(property_class, EndpointsAliasProperty)):
return
bases = property_class.__bases__
while len(bases) == 1 and bases[0] != ndb.Property:
base = bases[0]
if base in cls._property_to_proto:
cls._property_to_proto[property_class] = cls._property_to_proto[base]
return
else:
bases = base.__bases__
class EndpointsModel(ndb.Model):
"""Subclass of NDB model that enables translation to ProtoRPC message classes.
Also uses a subclass of ndb.MetaModel as the metaclass, to allow for custom
behavior (particularly property verification) on class creation. Two types of
properties are allowed, the standard NDB property, which ends up in a
_properties dictionary and {EndpointsAliasProperty}s, which end up in an
_alias_properties dictionary. They can be accessed simultaneously through
_GetEndpointsProperty.
As with NDB, you cannot use the same property object to describe multiple
properties -- you must create separate property objects for each property.
In addition to _alias_properties, there are several other class variables that
can be used to augment the default NDB model behavior:
_property_to_proto: This is a mapping from properties to ProtoRPC message
fields or methods which can take a property and an index and convert
them to a message field. It starts | |
# Wait until process is complete and return stdout/stderr
self.stdout_, self.stderr_ = self.process_.communicate() # Use this .communicate instead of .wait to avoid zombie process that hangs due to defunct. Removed timeout b/c it's not available in Python 2
# Set stdout and stderr to the contents of the file object created...or just the path then do a get_stdout() function
if hasattr(f_stdout, "close"):
f_stdout.close()
if hasattr(f_stderr, "close"):
f_stderr.close()
# Return code
self.returncode_ = self.process_.returncode
self.duration_ = format_duration(start_time)
# # stdout
# if isinstance(self.stdout_, bytes):
# self.stdout_ = self.stdout_.decode("utf-8")
# self._write_output(data=self.stdout_, filepath=write_stdout)
# # stderr
# if isinstance(self.stderr_, bytes):
# self.stderr_ = self.stderr_.decode("utf-8")
# self._write_output(data=self.stderr_, filepath=write_stderr)
# Return code
self._write_output(data=self.returncode_, filepath=write_returncode)
# Check
if not errors_ok:
if self.returncode_ not in acceptable_returncodes:
if error_message is not None:
print(error_message, file=f_verbose)
sys.exit(self.returncode_)
if self.returncode_ in acceptable_returncodes:
if completed_message is not None:
print(completed_message, file=f_verbose)
# Create checkpoint
if checkpoint is not None:
if self.returncode_ in acceptable_returncodes:
duration = format_duration(start_time)
with open(checkpoint, "w") as f_checkpoint:
print(get_timestamp(), duration, file=f_checkpoint)
# Close file object
if self.f_cmds not in {None, sys.stdout, sys.stderr}:
if close_file:
self.close()
return self
# Executable
class ExecutablePipeline(object):
def __init__(self,
name=None,
description=None,
checkpoint_directory=None,
log_directory=None,
checkpoint_message_notexists="Running. .. ... .....",
checkpoint_message_exists="Loading. .. ... .....",
f_cmds=None,
f_verbose=None,
bypass_io_validation_if_checkpoints_exist=False,
**metadata
):
self.name = name
self.description = description
self.metadata = metadata
self.executables = dict()
self.f_cmds = f_cmds
self.f_verbose = f_verbose
self.checkpoint_message_notexists = checkpoint_message_notexists
self.checkpoint_message_exists = checkpoint_message_exists
# Log directory
self.log_directory = log_directory
if log_directory is not None:
assert is_path_like(log_directory, path_must_exist=False), "`{}` is not path-like".format(log_directory)
log_directory = format_path(log_directory)
self.log_directory = create_directory(log_directory)
# Checkpoint directory
self.checkpoint_directory = checkpoint_directory
if checkpoint_directory is not None:
assert is_path_like(checkpoint_directory, path_must_exist=False), "`{}` is not path-like".format(checkpoint_directory)
checkpoint_directory = format_path(checkpoint_directory)
self.checkpoint_directory = create_directory(checkpoint_directory)
self.bypass_io_validation_if_checkpoints_exist = bypass_io_validation_if_checkpoints_exist
# Compiled
self.compiled = False
# Add step in pipeline
def add_step(self,
# Required
id,
cmd,
step="infer",
log_prefix="infer",
# Descriptions
description=None,
# I/O
input_filepaths=None,
output_filepaths=None,
# Utility
dry="infer",
errors_ok=False,
validate_inputs=True,
validate_outputs=True,
acceptable_returncodes=[0],
):
# Step
if step == "infer":
step = len(self.executables) + 1
assert isinstance(step, int), "Please specify an integer step."
# if bypass_io_validation_if_checkpoint_exists:
# HOW TO DETEREMINE WHAT THE CHECKPOINT FILE IS AT THIS STAGE
# if os.path.exists(checkpoint):
#
attrs = dict()
attrs["step"] = step
# Command
attrs["executable"] = Command(cmd,
name=id,
description=description,
f_cmds=self.f_cmds,
)
attrs["description"] = description
attrs["log_prefix"] = log_prefix
#I/O
if isinstance(input_filepaths, str):
input_filepaths = [input_filepaths]
if input_filepaths is None:
input_filepaths = list()
attrs["input_filepaths"] = input_filepaths
if isinstance(output_filepaths, str):
output_filepaths = [output_filepaths]
if output_filepaths is None:
output_filepaths = list()
attrs["output_filepaths"] = output_filepaths
# Checkpoints
attrs["checkpoint"] = None
attrs["write_stdout"] = None
attrs["write_stderr"] = None
attrs["write_returncode"] = None
attrs["error_message"] = None
attrs["completed_message"] = None
attrs["acceptable_returncodes"] = acceptable_returncodes
# Checkpoint
attrs["dry"] = dry
# Validation
attrs["errors_ok"] = errors_ok
attrs["validate_inputs"] = validate_inputs
attrs["validate_outputs"] = validate_outputs
self.executables[id] = attrs
return self
# Set attribute
def set_attribute(self, id, attribute, value):
assert id in self.executables, "`{}` not in `executables`".format(id)
self.executables[id][attribute] = value
return self
# Set the order of operations
def set_order_of_executables(self, ordering):
for id, step in ordering.items():
self.executables[id]
return self
# Compile everything and get ready for execution
def compile(self):
# if func_steps is None:
# func_steps = lambda step:step
# Check duplicate steps
steps = list()
for id, attrs in self.executables.items():
steps.append(attrs["step"])
assert all(map(lambda x: x == 1, Counter(steps).values()))
# Check missing steps
assert set(steps) == set(range(min(steps), max(steps)+1)), "Please check for missing steps."
# Check boolean attrs
for id, attrs in self.executables.items():
for attribute in ["errors_ok", "validate_inputs", "validate_outputs"]:
assert isinstance(attrs[attribute], bool), "`{}` must be a boolean type".format(attribute)
# Compiled steps
self.compiled_steps_ = OrderedDict()
print(format_header(". .. ... Compiling ... .. .", "="), file=self.f_verbose)
for id, attrs in sorted(self.executables.items(), key=lambda x:x[1]["step"]):
# Logfile name
if attrs["log_prefix"] in {"infer", None}:
attrs["log_prefix"] = "__".join([str(attrs["step"]).strip(), format_filename(id, replacement_character="-").strip()])
assert check_filename(attrs["log_prefix"]), "Please format the filename `{}` so it only inlcudes alphanumeric characters, '.', '_', and '-'.".format(attrs["log_prefix"])
# assert all(x)
# Checkpoint
if self.checkpoint_directory is not None:
attrs["checkpoint"] = os.path.join(self.checkpoint_directory, attrs["log_prefix"] )
# Log files
if self.log_directory is not None:
attrs["write_stdout"] = os.path.join(self.log_directory, "{}.o".format(attrs["log_prefix"] ))
attrs["write_stderr"] = os.path.join(self.log_directory, "{}.e".format(attrs["log_prefix"] ))
attrs["write_returncode"] = os.path.join(self.log_directory, "{}.returncode".format(attrs["log_prefix"] ))
attrs["error_message"] = "Check log files to diagnose error:\ncat {}.*".format(os.path.join(self.log_directory, attrs["log_prefix"] ))
attrs["completed_message"] = "\nLog files:\n{}.*".format(os.path.join(self.log_directory, attrs["log_prefix"] )) # Use glob here
# Add step order
self.compiled_steps_[attrs["step"]] = id
# Update attributes
self.executables[id].update(attrs)
print("Step: {}, {} | log_prefix = {} | {}".format(attrs["step"], id, attrs["log_prefix"], attrs["description"]), file=self.f_verbose)
# Compiled
self.compiled = True
return self
# Execute pipeline
def execute(self, steps=None, description="Executing pipeline", restart_from_checkpoint=None):
start_time = time.time()
assert self.compiled, "Please compile before continuing."
if steps is None:
steps = list(self.compiled_steps_.keys())
if self.name is not None:
if self.description is None:
print(format_header(". .. ... {} ... .. .".format(self.name), "_"), file=self.f_verbose)
else:
print(format_header(". .. ... {} || {} ... .. .".format(self.name, self.description), "_"), file=self.f_verbose)
print("", file=self.f_verbose)
if restart_from_checkpoint is not None:
restart_from_checkpoint = int(restart_from_checkpoint)
assert restart_from_checkpoint in steps, "Cannot restart from checkpoint `{}` because it does not exist".format(restart_from_checkpoint)
if self.checkpoint_directory is not None:
if restart_from_checkpoint == "preprocessing":
restart_from_checkpoint = 1
target_checkpoint = restart_from_checkpoint
print("Restarting pipeline from checkpoint:", target_checkpoint, file=self.f_verbose)
for file in scandir(self.checkpoint_directory):
if "_" in file.name:
query_checkpoint = int(file.name.split("_")[0].split(".")[0])
if query_checkpoint >= target_checkpoint:
print("...[-] {}".format(file.path), file=self.f_verbose)
os.remove(file.path)
# Intermediate files
for intermediate_filepath in self.executables[self.compiled_steps_[query_checkpoint]]["output_filepaths"]:
if os.path.exists(os.path.realpath(intermediate_filepath)):
try:
os.remove(intermediate_filepath)
print("...[-] {}".format(intermediate_filepath), file=self.f_verbose)
except OSError: # Changed from PermissionError 2020.01.15
pass
else:
print("...[=] {}".format(file.path), file=self.f_verbose)
print("", file=self.f_verbose)
for step, id in pv(self.compiled_steps_.items(), description=description):
if step in steps:
attrs = self.executables[id]
executable = attrs["executable"]
# Headers
print(format_header(". {} .".format(id), "="), sep="", file=self.f_verbose)
print("Input: ", attrs["input_filepaths"], "\n", "Output: ", attrs["output_filepaths"], "\n", sep="", file=self.f_verbose)
print("Command:", file=self.f_verbose)
print(attrs["executable"].cmd, "\n", file=self.f_verbose)
# ===============
# Execute command
# ===============
# Bypass io validation
if self.bypass_io_validation_if_checkpoints_exist:
if os.path.exists(attrs["checkpoint"]):
# if_file_isnt_empty?
attrs["validate_inputs"] = False
attrs["validate_outputs"] = False
print("! Bypassing I/O validation: {}\n".format(attrs["checkpoint"]), file=self.f_verbose)
# Check inputs
if attrs["validate_inputs"]:
input_filepaths = attrs["input_filepaths"]
if bool(input_filepaths):
assert is_nonstring_iterable(input_filepaths), "`input_filepaths` must be a non-string iterable"
# paths_expanded = list()
# for path in input_filepaths:
# if "*" in path:
# paths_expanded += glob.glob(path)
# else:
# # path = os.path.realpath(path)
# paths_expanded.append(path)
# self.executables[id]["input_filepaths"] = paths_expanded
validate_file_existence(input_filepaths, prologue="Validating the following input files:", f_verbose=self.f_verbose)
print("", file=self.f_verbose)
# Execute
executable.run(
prologue=id,
dry=attrs["dry"],
errors_ok=attrs["errors_ok"],
error_message=attrs["error_message"],
completed_message=attrs["completed_message"],
checkpoint=attrs["checkpoint"],
checkpoint_message_notexists=self.checkpoint_message_notexists,
checkpoint_message_exists=self.checkpoint_message_exists,
write_stdout=attrs["write_stdout"],
write_stderr=attrs["write_stderr"],
write_returncode=attrs["write_returncode"],
acceptable_returncodes=attrs["acceptable_returncodes"],
f_verbose=self.f_verbose,
)
# Check outputs
if attrs["validate_outputs"]:
output_filepaths = attrs["output_filepaths"]
if bool(output_filepaths):
assert is_nonstring_iterable(output_filepaths), "`output_filepaths` must be a non-string iterable"
validate_file_existence(output_filepaths, prologue="\nValidating the following output files:", f_verbose=self.f_verbose)
# print("", file=self.f_verbose)
print("\nDuration: {}\n".format(format_duration(start_time)), file=self.f_verbose)
self.duration_ = format_duration(start_time)
print("\n", format_header("Total duration: {}".format(self.duration_), "."), sep="", file=self.f_verbose)
return self
# # Save object
# def to_file(self, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL, *args):
# # Cannot serialize file objects while open so we need make placeholders
# f_cmds = self.f_cmds
# f_verbose = self.f_verbose
# self.f_verbose = None
# self.f_cmds = None
# # for id in self.executables.keys():
# # self.executables[id]["executable"].f_verbose = None
# # self.executables[id]["executable"].f_cmds = None
# # Write object
# write_object(self, path=path, compression=compression, protocol=protocol, *args)
# # Set file objects
# self.f_verbose = f_verbose
# self.f_cmds = f_cmds
# return self
# Load object
@classmethod
def from_file(cls, path, compression="infer", f_verbose=None, f_cmds=None):
cls = read_object(path=path, compression=compression)
cls.f_verbose = f_verbose
cls.f_cmds = f_cmds
return cls
def __getitem__(self, id):
assert id in self.executables, "`{}` not in `executables`".format(id)
return self.executables[id]
def main():
directories = dict()
directories["output"] = create_directory("pipeline_testing")
directories["checkpoints"] = create_directory(os.path.join(directories["output"], "checkpoints"))
directories["logs"] = create_directory(os.path.join(directories["output"], "logs"))
with open(os.path.join(directories["output"], "commands.sh"), "w") as f_cmds:
ep = ExecutablePipeline(name="Sith", description="The rule of two", f_cmds=f_cmds, checkpoint_directory=directories["checkpoints"], log_directory=directories["logs"], bypass_io_validation_if_checkpoints_exist=True)
# Step 1
output_filepaths = [os.path.join(directories["output"], "holocron.txt")]
message = "Two there should be; no more, no less. One to embody the power, the other to crave it.\n"
ep.add_step(id="<NAME>",
cmd="echo '{}' > {}".format(message, output_filepaths[0]),
output_filepaths=output_filepaths,
description = "Begin the rule of two",
errors_ok=False,
validate_outputs=True,
)
# Step 2
input_filepaths = [os.path.join(directories["output"], "holocron.txt")]
output_filepaths = [os.path.join(directories["output"], "*.txt")]
ep.add_step(id="<NAME>",
cmd="(cat {} && echo 'jedi') > {} ".format(input_filepaths[0], os.path.join(directories["output"], "wisdom.txt")),
input_filepaths=input_filepaths,
output_filepaths=output_filepaths,
description = "Read the holocron",
errors_ok=False,
validate_inputs=True,
validate_outputs=True,
)
# Step 3
input_filepaths = [os.path.join(directories["output"], "holocron.txt"), os.path.join(directories["output"], "wisdom.txt")]
output_directory = create_directory(os.path.join(directories["output"], "temple"))
output_filepaths = [os.path.join(output_directory, "data-crystal.txt"), output_directory]
cmds = [
"(",
"mkdir -p {}".format(output_directory),
"&&",
"cat {} {} > {}".format(input_filepaths[0], input_filepaths[1], output_filepaths[0]),
")",
]
ep.add_step(id="<NAME>",
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import argparse
import binascii
import contextlib
import glob
import gzip
import itertools
import json
import os
import random
import six
import string
import subprocess
import sys
import tempfile
import traceback
import uuid
import errno
import numpy as np
ARROW_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
# Control for flakiness
np.random.seed(12345)
def load_version_from_pom():
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join(ARROW_HOME, 'java', 'pom.xml'))
tag_pattern = '{http://maven.apache.org/POM/4.0.0}version'
version_tag = list(tree.getroot().findall(tag_pattern))[0]
return version_tag.text
def guid():
return uuid.uuid4().hex
# from pandas
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def tobytes(o):
if isinstance(o, six.text_type):
return o.encode('utf8')
return o
def frombytes(o):
if isinstance(o, six.binary_type):
return o.decode('utf8')
return o
# from the merge_arrow_pr.py script
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % ' '.join(cmd))
print('With output:')
print('--------------')
print(frombytes(e.output))
print('--------------')
raise e
return frombytes(output)
# ----------------------------------------------------------------------
# Data generation
class DataType(object):
def __init__(self, name, nullable=True):
self.name = name
self.nullable = nullable
def get_json(self):
return OrderedDict([
('name', self.name),
('type', self._get_type()),
('nullable', self.nullable),
('children', self._get_children())
])
def _make_is_valid(self, size):
if self.nullable:
return np.random.randint(0, 2, size=size)
else:
return np.ones(size)
class Column(object):
def __init__(self, name, count):
self.name = name
self.count = count
def __len__(self):
return self.count
def _get_children(self):
return []
def _get_buffers(self):
return []
def get_json(self):
entries = [
('name', self.name),
('count', self.count)
]
buffers = self._get_buffers()
entries.extend(buffers)
children = self._get_children()
if len(children) > 0:
entries.append(('children', children))
return OrderedDict(entries)
class PrimitiveType(DataType):
def _get_children(self):
return []
class PrimitiveColumn(Column):
def __init__(self, name, count, is_valid, values):
super(PrimitiveColumn, self).__init__(name, count)
self.is_valid = is_valid
self.values = values
def _encode_value(self, x):
return x
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('DATA', list([self._encode_value(x) for x in self.values]))
]
TEST_INT_MAX = 2 ** 31 - 1
TEST_INT_MIN = ~TEST_INT_MAX
class IntegerType(PrimitiveType):
def __init__(self, name, is_signed, bit_width, nullable=True,
min_value=TEST_INT_MIN,
max_value=TEST_INT_MAX):
super(IntegerType, self).__init__(name, nullable=nullable)
self.is_signed = is_signed
self.bit_width = bit_width
self.min_value = min_value
self.max_value = max_value
def _get_generated_data_bounds(self):
if self.is_signed:
signed_iinfo = np.iinfo('int' + str(self.bit_width))
min_value, max_value = signed_iinfo.min, signed_iinfo.max
else:
unsigned_iinfo = np.iinfo('uint' + str(self.bit_width))
min_value, max_value = 0, unsigned_iinfo.max
lower_bound = max(min_value, self.min_value)
upper_bound = min(max_value, self.max_value)
return lower_bound, upper_bound
def _get_type(self):
return OrderedDict([
('name', 'int'),
('isSigned', self.is_signed),
('bitWidth', self.bit_width)
])
def generate_column(self, size, name=None):
lower_bound, upper_bound = self._get_generated_data_bounds()
return self.generate_range(size, lower_bound, upper_bound, name=name)
def generate_range(self, size, lower, upper, name=None):
values = [int(x) for x in
np.random.randint(lower, upper, size=size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class DateType(IntegerType):
DAY = 0
MILLISECOND = 1
# 1/1/1 to 12/31/9999
_ranges = {
DAY: [-719162, 2932896],
MILLISECOND: [-62135596800000, 253402214400000]
}
def __init__(self, name, unit, nullable=True):
bit_width = 32 if unit == self.DAY else 64
min_value, max_value = self._ranges[unit]
super(DateType, self).__init__(
name, True, bit_width, nullable=nullable,
min_value=min_value, max_value=max_value
)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'date'),
('unit', 'DAY' if self.unit == self.DAY else 'MILLISECOND')
])
TIMEUNIT_NAMES = {
's': 'SECOND',
'ms': 'MILLISECOND',
'us': 'MICROSECOND',
'ns': 'NANOSECOND'
}
class TimeType(IntegerType):
BIT_WIDTHS = {
's': 32,
'ms': 32,
'us': 64,
'ns': 64
}
_ranges = {
's': [0, 86400],
'ms': [0, 86400000],
'us': [0, 86400000000],
'ns': [0, 86400000000000]
}
def __init__(self, name, unit='s', nullable=True):
min_val, max_val = self._ranges[unit]
super(TimeType, self).__init__(name, True, self.BIT_WIDTHS[unit],
nullable=nullable,
min_value=min_val,
max_value=max_val)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'time'),
('unit', TIMEUNIT_NAMES[self.unit]),
('bitWidth', self.bit_width)
])
class TimestampType(IntegerType):
# 1/1/1 to 12/31/9999
_ranges = {
's': [-62135596800, 253402214400],
'ms': [-62135596800000, 253402214400000],
'us': [-62135596800000000, 253402214400000000],
# Physical range for int64, ~584 years and change
'ns': [np.iinfo('int64').min, np.iinfo('int64').max]
}
def __init__(self, name, unit='s', tz=None, nullable=True):
min_val, max_val = self._ranges[unit]
super(TimestampType, self).__init__(name, True, 64, nullable=nullable,
min_value=min_val,
max_value=max_val)
self.unit = unit
self.tz = tz
def _get_type(self):
fields = [
('name', 'timestamp'),
('unit', TIMEUNIT_NAMES[self.unit])
]
if self.tz is not None:
fields.append(('timezone', self.tz))
return OrderedDict(fields)
class DurationIntervalType(IntegerType):
def __init__(self, name, unit='s', nullable=True):
min_val, max_val = np.iinfo('int64').min, np.iinfo('int64').max,
super(DurationIntervalType, self).__init__(
name, True, 64, nullable=nullable,
min_value=min_val,
max_value=max_val)
self.unit = unit
def _get_type(self):
fields = [
('name', 'duration'),
('unit', TIMEUNIT_NAMES[self.unit])
]
return OrderedDict(fields)
class YearMonthIntervalType(IntegerType):
def __init__(self, name, nullable=True):
min_val, max_val = [-10000*12, 10000*12] # +/- 10000 years.
super(YearMonthIntervalType, self).__init__(
name, True, 32, nullable=nullable,
min_value=min_val,
max_value=max_val)
def _get_type(self):
fields = [
('name', 'interval'),
('unit', 'YEAR_MONTH'),
]
return OrderedDict(fields)
class DayTimeIntervalType(PrimitiveType):
def __init__(self, name, nullable=True):
super(DayTimeIntervalType, self).__init__(name, nullable=True)
@property
def numpy_type(self):
return object
def _get_type(self):
return OrderedDict([
('name', 'interval'),
('unit', 'DAY_TIME'),
])
def generate_column(self, size, name=None):
min_day_value, max_day_value = -10000*366, 10000*366
values = [{'days': random.randint(min_day_value, max_day_value),
'milliseconds': random.randint(-86400000, +86400000)}
for _ in range(size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class FloatingPointType(PrimitiveType):
def __init__(self, name, bit_width, nullable=True):
super(FloatingPointType, self).__init__(name, nullable=nullable)
self.bit_width = bit_width
self.precision = {
16: 'HALF',
32: 'SINGLE',
64: 'DOUBLE'
}[self.bit_width]
@property
def numpy_type(self):
return 'float' + str(self.bit_width)
def _get_type(self):
return OrderedDict([
('name', 'floatingpoint'),
('precision', self.precision)
])
def generate_column(self, size, name=None):
values = np.random.randn(size) * 1000
values = np.round(values, 3)
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
DECIMAL_PRECISION_TO_VALUE = {
key: (1 << (8 * i - 1)) - 1 for i, key in enumerate(
[1, 3, 5, 7, 10, 12, 15, 17, 19, 22, 24, 27, 29, 32, 34, 36],
start=1,
)
}
def decimal_range_from_precision(precision):
assert 1 <= precision <= 38
try:
max_value = DECIMAL_PRECISION_TO_VALUE[precision]
except KeyError:
return decimal_range_from_precision(precision - 1)
else:
return ~max_value, max_value
class DecimalType(PrimitiveType):
def __init__(self, name, precision, scale, bit_width=128, nullable=True):
super(DecimalType, self).__init__(name, nullable=True)
self.precision = precision
self.scale = scale
self.bit_width = bit_width
@property
def numpy_type(self):
return object
def _get_type(self):
return OrderedDict([
('name', 'decimal'),
('precision', self.precision),
('scale', self.scale),
])
def generate_column(self, size, name=None):
min_value, max_value = decimal_range_from_precision(self.precision)
values = [random.randint(min_value, max_value) for _ in range(size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return DecimalColumn(name, size, is_valid, values, self.bit_width)
class DecimalColumn(PrimitiveColumn):
def __init__(self, name, count, is_valid, values, bit_width=128):
super(DecimalColumn, self).__init__(name, count, is_valid, values)
self.bit_width = bit_width
def _encode_value(self, x):
return str(x)
class BooleanType(PrimitiveType):
bit_width = 1
def _get_type(self):
return OrderedDict([('name', 'bool')])
@property
def numpy_type(self):
return 'bool'
def generate_column(self, size, name=None):
values = list(map(bool, np.random.randint(0, 2, size=size)))
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class BinaryType(PrimitiveType):
@property
def numpy_type(self):
return object
@property
def column_class(self):
return BinaryColumn
def _get_type(self):
return OrderedDict([('name', 'binary')])
def generate_column(self, size, name=None):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
draw = (np.random.randint(0, 255, size=K)
.astype(np.uint8)
.tostring())
values.append(draw)
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class FixedSizeBinaryType(PrimitiveType):
def __init__(self, name, byte_width, nullable=True):
super(FixedSizeBinaryType, self).__init__(name, nullable=nullable)
self.byte_width = byte_width
@property
def numpy_type(self):
return object
@property
def column_class(self):
return FixedSizeBinaryColumn
def _get_type(self):
return OrderedDict([('name', 'fixedsizebinary'),
('byteWidth', self.byte_width)])
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)]),
OrderedDict([('type', 'DATA'),
('typeBitWidth', self.byte_width)])])])
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
draw = (np.random.randint(0, 255, size=self.byte_width)
.astype(np.uint8)
.tostring())
values.append(draw)
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class StringType(BinaryType):
@property
def column_class(self):
return StringColumn
def _get_type(self):
return OrderedDict([('name', 'utf8')])
def generate_column(self, size, name=None):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
values.append(tobytes(rands(K)))
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class JsonSchema(object):
def __init__(self, fields):
self.fields = fields
def get_json(self):
return | |
import time
from .settings import *
# Selecionando o algoritmo para exibir na tela
def draw_lines(grid, algorithm, posX1, posY1, posX2, posY2, color, rows, pixel_size, line):
# Como posições são sempre floats, arredondarei para int
posX1, posX2, posY1, posY2 = int(posX1), int(posX2), int(posY1), int(posY2)
# Não fazer nada se for a primeira iteração
if posX1 > 0 and posX2 > 0 and posY1 > 0 and posY2 > 0:
grid = init_grid()
if algorithm == "DDA":
grid = DDA(posX1, posY1, posX2, posY2, grid, color, rows, pixel_size)
elif algorithm == "Bresenham":
grid = bresenham(posX1, posY1, posX2, posY2, grid, color, rows, pixel_size)
elif algorithm == "Círculo":
grid = draw_circle_bresenham(posX1, posY2, abs(posX2 - posX1), grid, color, rows, pixel_size)
elif algorithm == "Cohen Sutherland":
clip = Clipping()
grid = clip.cohenSutherland(posX1, posY1, BLUE, rows, pixel_size, line, grid)
elif algorithm == "Liang Barsky":
clip = Clipping()
grid = clip.liangBarsky(posX1, posY1, RED, rows, pixel_size, line, grid)
if not grid:
grid = init_grid()
return grid
# Transformando uma posição do pygame em uma posição do grid
def get_row_col_from_pos(pos, rows, pixel_size):
x, y = pos
row = x // pixel_size
col = y // pixel_size
# A posição passada não está dentro da área desenhavel
if row >= rows:
raise IndexError
return col, row
# Inicializando o grid que será desenhado
def init_grid():
grid = []
for i in range(ROWS):
grid.append([])
for _ in range(COLS):
grid[i].append(BG_COLOR)
return grid
# Algoritmo DDA para escrever na tela
def DDA(posX1, posY1, posX2, posY2, grid, color, rows, pixel_size):
dx = dy = passos = 0
x = y = 0
# Inicio do algoritmo
dx = posX2 - posX1
dy = posY2 - posY1
if abs(dx) > abs(dy):
passos = abs(dx)
else:
passos = abs(dy)
# Desenhar no mesmo pixel
if passos == 0:
passos = 1
x_incr = dx / passos
y_incr = dy / passos
x = posX1
y = posY1
grid = draw_in_grid(x, y, rows, pixel_size, grid, color)
for i in range(passos):
x = x + x_incr
y = y + y_incr
grid = draw_in_grid(x, y, rows, pixel_size, grid, color)
return grid
# Algoritmo de Brensenham para desenhar linha
def bresenham(x1, y1, x2, y2, grid, color, rows, pixel_size):
if x1 < x2 and y2 > x1:
x1, x2 = x2, x1
y1, y2 = y2, y1
elif x1 < x2 and y1 > y2:
x1, x2 = x2, x1
y1, y2 = y2, y1
# Calcular o delta X e delta y
dx = abs(x2 - x1)
dy = abs(y2 - y1)
p = 2*dy - dx
# Evitando uma divisão por zero
if dx == 0:
return
# Calcular angulo da reta
slope = dy // dx
if slope >= 1:
const1 = 2 * dx
const2 = 2 * dx - 2 * dy
else:
const1 = 2 * dy
const2 = 2 * dy - 2 * dx
x = x1
y = y1
# Definindo a direção da reta
if y2 > y1:
passo_y = 1
else:
passo_y = -1
grid = draw_in_grid(x, y, rows, pixel_size, grid, color) # Desenhar no grid
# Retornando da função se não for possível desenhar
if not grid:
return
if x2 > x1:
passo_x = x
while x <= x2:
grid = draw_in_grid(x, y, rows, pixel_size, grid, color) # Desenhar no grid
# Retornando da função se não for possível desenhar
if not grid:
return
if slope >= 1:
y = y + passo_y
else:
x = x + passo_x
if p < 0:
p = p + const1
else:
p = p + const2
if slope >= 1:
x = x + passo_x
else:
y = y + passo_y
else:
passo_x = -1
# Desenhe a reta
while x >= x2:
grid = draw_in_grid(x, y, rows, pixel_size, grid, color) # Desenhar no grid
# Retornando da função se não for possível desenhar
if not grid:
return
if slope >= 1:
y = y + passo_y
else:
x = x + passo_x
if p < 0:
p = p + const1
else:
p = p + const2
if slope >= 1:
x = x + passo_x
else:
y = y + passo_y
return grid
# Desenhar circulo com bresenham
def draw_circle_bresenham(x, y, raio, grid, color, rows, pixel_size):
# Desenhar circulos
def draw_circle(xc, yc, x, y, grid, color, rows, pixel_size):
grid = draw_in_grid(xc + x, yc + y, rows, pixel_size, grid, color)
grid = draw_in_grid(xc - x, yc + y, rows, pixel_size, grid, color)
grid = draw_in_grid(xc + x, yc - y, rows, pixel_size, grid, color)
grid = draw_in_grid(xc - x, yc - y, rows, pixel_size, grid, color)
grid = draw_in_grid(xc + y, yc + x, rows, pixel_size, grid, color)
grid = draw_in_grid(xc - y, yc + x, rows, pixel_size, grid, color)
grid = draw_in_grid(xc + y, yc - x, rows, pixel_size, grid, color)
grid = draw_in_grid(xc - y, yc - x, rows, pixel_size, grid, color)
return grid
# Desenhar todos os pontos do círculo
def brensenham(xc, yc, r, rows, pixel_size, grid, color):
x = 0
y = r
d = 3 - 2 * r
grid = draw_circle(xc, yc, x, y, grid, color, rows, pixel_size)
# Ir desenhando o circulo 8 pixels de cada vez
while y >= x:
x += 1
if d > 0:
y -= 1
d += 4 * (x - y) + 10
else:
d += 4 * x + 6
grid = draw_circle(xc, yc, x, y, grid, color, rows, pixel_size)
return grid
# Chamando os métodos para desenhar o círculo
return brensenham(x, y, raio, rows, pixel_size, grid, color)
class Clipping:
# Valores para o bitwase
DENTRO = 0b0
ESQUERDA = 0b1
DIREITA = 0b10
ABAIXO = 0b100
TOPO = 0b1000
# Valores máximos para o desenho
x_max = -1
y_max = -1
x_min = -1
y_min = -1
# Valores auxiliares, pois python não tem ponteiro
t1 = t2 = 0
# Verificar se o retangulo existe
def existsRectangle(self) -> bool:
return self.x_max != -1 and self.y_max != -1 and self.x_min != -1 and self.y_min != -1
# Verificar onde o ponto está
def qualLado(self, x, y):
code = self.DENTRO
# Linha a esquerda do retangulo
if x < self.x_min:
code |= self.ESQUERDA
# Linha está a direita do retângulo
elif x > self.x_max:
code = self.DIREITA
# Linha está abaixo do retângulo
if y < self.y_min:
code |= self.ABAIXO
# Linha está acima do retângulo
elif y > self.y_max:
code |= self.TOPO
return code
def desenharRetangulo(self, x, y, color, rows, grid, pixel_size, line):
# Desenhando um retangulo
deslocamento_x = 150
deslocamento_y = 100
for i in range(deslocamento_x):
grid = draw_in_grid(x + i, y, rows, pixel_size, grid, color) # Desenhando da esquerda para a direita
for i in range(deslocamento_y):
grid = draw_in_grid(x + deslocamento_x, y + i, rows, pixel_size, grid,
color) # Desenhando da direita para baixo
for i in range(deslocamento_y):
grid = draw_in_grid(x, y + i, rows, pixel_size, grid, color) # Desenhando de cima para baixo
for i in range(deslocamento_x):
grid = draw_in_grid(x + i, y + deslocamento_y, rows, pixel_size, grid,
color) # Desenhando da direita para baixo
return grid
# Algoritmo de Cohen Sutherland para clipping
def cohenSutherland(self, retanguloX, retanguloY, color, rows, pixel_size, line, grid):
self.x_min = retanguloX
self.x_max = retanguloX + 150
self.y_min = retanguloY
self.y_max = retanguloY + 100
pontoX1 = line.pontoX1
pontoY1 = line.pontoY1
pontoX2 = line.pontoX2
pontoY2 = line.pontoY2
lado1 = self.qualLado(pontoX1, pontoY1)
lado2 = self.qualLado(pontoX2, pontoY2)
desenhar = False
run = True
while run:
if lado1 == 0 and lado2 == 0:
desenhar = True
run = False
elif lado1 & lado2 != 0:
run = False
else:
# Algum segmento esta dentro do retangulo
lado_fora = 0
x = y = 0
if lado1 != 0:
lado_fora = lado1
else:
lado_fora = lado2
# Pontos de interseç�o
if (lado_fora & self.TOPO) != 0:
x = pontoX1 + (pontoX2 - pontoX1) * (self.y_max - pontoY1) / (pontoY2 - pontoY1)
y = self.y_max
elif (lado_fora & self.ABAIXO) != 0:
x = pontoX1 + (pontoX2 - pontoX1) * (self.y_min - pontoY1) / (pontoY2 - pontoY1)
y = self.y_min
elif (lado_fora & self.DIREITA) != 0:
y = pontoY1 + (pontoY2 - pontoY1) * (self.x_max - pontoX1) / (pontoX2 - pontoX1)
x = self.x_max
elif (lado_fora & self.ESQUERDA) != 0:
y = pontoY1 + (pontoY2 - pontoY1) * (self.x_min - pontoX1) / (pontoX2 - pontoX1)
x = self.x_min
if lado_fora == lado1:
pontoX1 = x
pontoY1 = y
lado1 = self.qualLado(pontoX1, pontoY1)
else:
pontoX2 = x
pontoY2 = y
lado2 = self.qualLado(pontoX2, pontoY2)
if desenhar:
grid = draw_lines(grid, line.algoritmo, pontoX1, pontoY1, pontoX2, pontoY2, RED, rows, pixel_size, line)
else:
grid = init_grid()
return self.desenharRetangulo(retanguloX, retanguloY, BLUE, rows, grid, pixel_size, line)
# Testando o clipping
def testandoClipping(self, ponto1, ponto2) -> bool:
isClipping = True
r = 0
if ponto1 < 0:
r = ponto2 / ponto1
if r > self.t2:
isClipping = False
elif r > self.t1:
self.t1 = r
elif ponto1 > 0:
r = ponto2 / ponto1
if r < self.t1:
isClipping = False
elif r < self.t2:
self.t2 = r
else:
if ponto2 < 0:
isClipping = False
return isClipping
# Algoritmo de Liang Barsky para clipping
def liangBarsky(self, retanguloX, retanguloY, color, rows, pixel_size, line, grid):
dx = line.pontoX2 - line.pontoX1
self.t1 = 0
self.t2 = 1
pontoX1 = line.pontoX1
pontoY1 = line.pontoY1
pontoX2 = line.pontoX2
pontoY2 = line.pontoY2
if self.testandoClipping(-dx, pontoX1 - retanguloX) and self.testandoClipping(dx, retanguloX + 150 - pontoX1):
dy = pontoY2 - pontoY1
if self.testandoClipping(-dy, pontoY1 - retanguloY) and self.testandoClipping(dy, retanguloY + 100 - pontoY1):
if self.t2 < 1.0:
pontoX2 = int(pontoX1 + self.t2*dx)
pontoY2 = int(pontoY1 | |
comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = _p.sample({"z": z_sample})["x"].view(-1, 1, 28, 28).cpu() # TODO: it should be sample_mean
return sample
# In[12]:
# writer = SummaryWriter()
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# # JMVAE with a PoE encoder (using the VAE class)
# * JMVAE: Joint Multimodal Learning with Deep Generative Models
# * The PoE encoder is originally proposed in "Multimodal Generative Models for Scalable Weakly-Supervised Learning"
@pytest.mark.performance
def test_run_jmvae_poe():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x)
class InferenceX(Normal):
def __init__(self):
super(InferenceX, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|y)
class InferenceY(Normal):
def __init__(self):
super(InferenceY, self).__init__(cond_var=["y"], var=["z"], name="q")
self.fc1 = nn.Linear(y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, y):
h = F.relu(self.fc1(y))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class GeneratorX(Bernoulli):
def __init__(self):
super(GeneratorX, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
# generative model p(y|z)
class GeneratorY(Categorical):
def __init__(self):
super(GeneratorY, self).__init__(cond_var=["z"], var=["y"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": F.softmax(self.fc3(h), dim=1)}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_x = GeneratorX().to(device)
p_y = GeneratorY().to(device)
p = p_x * p_y
q_x = InferenceX().to(device)
q_y = InferenceY().to(device)
q = ProductOfNormal([q_x, q_y], name="q").to(device)
# In[5]:
print(q)
print_latex(q)
# In[6]:
print(p)
print_latex(p)
# In[7]:
kl = KullbackLeibler(q, prior)
kl_x = KullbackLeibler(q, q_x)
kl_y = KullbackLeibler(q, q_y)
regularizer = kl + kl_x + kl_y
print(regularizer)
print_latex(regularizer)
# In[8]:
model = VAE(q, p, other_distributions=[q_x, q_y],
regularizer=regularizer, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[9]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[10]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[11]:
def plot_reconstrunction_missing(x):
with torch.no_grad():
z = q_x.sample({"x": x}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_label(x, y):
with torch.no_grad():
x_all = [x.view(-1, 1, 28, 28)]
for i in range(7):
z = q_y.sample({"y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
x_all.append(recon_batch)
comparison = torch.cat(x_all).cpu()
return comparison
def plot_reconstrunction(x, y):
with torch.no_grad():
z = q.sample({"x": x, "y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
# In[12]:
# writer = SummaryWriter()
plot_number = 1
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_label(_x[:8], _y[:8])
recon_missing = plot_reconstrunction_missing(_x[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_label', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_missing', recon_missing, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Joint multimodal variational autoencoder (JMVAE, using the VAE class)
# Original paper: Joint Multimodal Learning with Deep Generative Models (https://arxiv.org/abs/1611.01891 )
@pytest.mark.performance
def test_run_jmvae():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x,y)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x", "y"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim + y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x, y):
h = F.relu(self.fc1(torch.cat([x, y], 1)))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|x)
class InferenceX(Normal):
def __init__(self):
super(InferenceX, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|y)
class InferenceY(Normal):
def __init__(self):
super(InferenceY, self).__init__(cond_var=["y"], var=["z"], name="q")
self.fc1 = nn.Linear(y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, y):
h = F.relu(self.fc1(y))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class GeneratorX(Bernoulli):
def __init__(self):
super(GeneratorX, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
# generative model p(y|z)
class GeneratorY(Categorical):
def __init__(self):
super(GeneratorY, self).__init__(cond_var=["z"], var=["y"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": F.softmax(self.fc3(h), dim=1)}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_x = GeneratorX().to(device)
p_y = GeneratorY().to(device)
q = Inference().to(device)
q_x = InferenceX().to(device)
q_y = InferenceY().to(device)
p = p_x * p_y
# In[5]:
print(p)
print_latex(p)
# In[6]:
kl = KullbackLeibler(q, prior)
kl_x = KullbackLeibler(q, q_x)
kl_y = KullbackLeibler(q, q_y)
regularizer = kl + kl_x + kl_y
print(regularizer)
print_latex(regularizer)
# In[7]:
model = VAE(q, p, other_distributions=[q_x, q_y],
regularizer=regularizer, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[8]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[9]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[10]:
def plot_reconstrunction_missing(x):
with torch.no_grad():
z = q_x.sample({"x": x}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_label(x, y):
with torch.no_grad():
x_all = [x.view(-1, 1, 28, 28)]
for i in range(7):
z = q_y.sample({"y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
x_all.append(recon_batch)
comparison = torch.cat(x_all).cpu()
return comparison
def plot_reconstrunction(x, y):
with torch.no_grad():
z = q.sample({"x": x, "y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
# In[11]:
# writer = SummaryWriter()
plot_number = 1
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_label(_x[:8], _y[:8])
recon_missing = plot_reconstrunction_missing(_x[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_label', | |
"m.relates_to" in event['content']:
# это ответ на сообщение:
reply_to_id=event['content']['m.relates_to']['m.in_reply_to']['event_id']
formatted_body=None
format_type=None
if "formatted_body" in event['content'] and "format" in event['content']:
formatted_body=event['content']['formatted_body']
format_type=event['content']['format']
elif event['content']['msgtype'] == "m.video":
file_type=event['content']['info']['mimetype']
file_url=event['content']['url']
elif event['content']['msgtype'] == "m.image":
file_url=event['content']['url']
if "imageinfo" in event['content']['info']:
file_type=event['content']['info']['imageinfo']['mimetype']
else:
file_type=event['content']['info']['mimetype']
elif event['content']['msgtype'] == "m.file":
file_url=event['content']['url']
if "fileinfo" in event['content']['info']:
file_type=event['content']['info']['fileinfo']['mimetype']
else:
file_type=event['content']['info']['mimetype']
elif event['content']['msgtype'] == "m.audio":
file_url=event['content']['url']
if "fileinfo" in event['content']['info']:
file_type=event['content']['info']['fileinfo']['mimetype']
elif "audioinfo" in event['content']['info']:
file_type=event['content']['info']['audioinfo']['mimetype']
else:
file_type=event['content']['info']['mimetype']
log.debug("%s: %s"%(event['sender'], event['content']["body"]))
log.debug("try lock before process_command()")
with lock:
log.debug("success lock before process_command()")
if process_command(\
event['sender'],\
event['room_id'],\
event['content']["body"],\
formated_message=formatted_body,\
format_type=format_type,\
reply_to_id=reply_to_id,\
file_url=file_url,\
file_type=file_type\
) == False:
log.error("error process command: '%s'"%event['content']["body"])
return False
log.debug("success lock() before access global data")
else:
log.warning("unknown type of event:")
log.warning(event['type'])
return False
return True
except Exception as e:
log.error(get_exception_traceback_descr(e))
log.error("exception at execute on_message()")
bot_system_message(user,"внутренняя ошибка бота")
log.error("json of event:")
log.error(json.dumps(event, indent=4, sort_keys=True,ensure_ascii=False))
return False
def on_event(event):
global log
log.debug("=start function=")
print("event:")
print(event)
print(json.dumps(event, indent=4, sort_keys=True,ensure_ascii=False))
def on_invite(room, event):
global client
global log
global lock
global data
log.debug("=start function=")
if conf.debug:
print("invite:")
print("room_data:")
print(room)
print("event_data:")
print(event)
print(json.dumps(event, indent=4, sort_keys=True,ensure_ascii=False))
# Просматриваем сообщения:
for event_item in event['events']:
if event_item['type'] == "m.room.join_rules":
if event_item['content']['join_rule'] == "invite":
user=event_item["sender"]
# проверка на разрешения:
allow=False
if len(conf.allow_domains)>0:
for allow_domain in conf.allow_domains:
if re.search('.*:%s$'%allow_domain.lower(), user.lower()) is not None:
allow=True
log.info("user: %s from allow domain: %s - allow invite"%(user, allow_domain))
break
if len(conf.allow_users)>0:
for allow_user in conf.allow_users:
if allow_user.lower() == user.lower():
allow=True
log.info("user: %s from allow users - allow invite"%user)
break
if len(conf.allow_domains)==0 and len(conf.allow_users)==0:
allow=True
if allow == True:
# Приглашение вступить в комнату:
room = client.join_room(room)
room.send_text("Спасибо за приглашение! Недеюсь быть Вам полезным. :-)")
room.send_text("Для справки по доступным командам - наберите: '!help' (или '!?', или '!h')")
log.info("New user: '%s'"%user)
# Прописываем системную группу для пользователя (группа, в которую будут сыпаться системные сообщения от бота и где он будет слушать команды):
log.debug("try lock() before access global data()")
with lock:
log.debug("success lock() before access global data")
if "users" not in data:
data["users"]={}
if user not in data["users"]:
data["users"][user]={}
if "matrix_bot_data" not in data["users"][user]:
data["users"][user]["matrix_bot_data"]={}
if "control_room" not in data["users"][user]["matrix_bot_data"]:
data["users"][user]["matrix_bot_data"]["control_room"]=room.room_id
save_data(data)
log.debug("release lock() after access global data")
def exception_handler(e):
global client
global log
log.debug("=start function=")
log.error("main MATRIX listener thread except. He must retrying...")
print(e)
log.info("wait 30 second before retrying...")
time.sleep(30)
def main():
global client
global data
global log
global lock
lock = threading.RLock()
log.debug("try lock() before access global data()")
with lock:
log.debug("success lock() before access global data")
data=load_data()
log.debug("release lock() after access global data")
log.info("try init matrix-client")
client = MatrixClient(conf.server)
log.info("success init matrix-client")
while True:
try:
log.info("try login matrix-client")
token = client.login(username=conf.username, password=<PASSWORD>,device_id=conf.device_id)
log.info("success login matrix-client")
client.api.sync(set_presence="unavailable")
except MatrixRequestError as e:
print(e)
log.debug(e)
if e.code == 403:
log.error("Bad username or password.")
else:
log.error("Check your sever details are correct.")
sys.exit(4)
except MissingSchema as e:
print(e)
log.error("Bad URL format.")
log.error(get_exception_traceback_descr(e))
log.debug(e)
sys.exit(4)
except Exception as e:
log.error("Unknown connect error")
log.error(get_exception_traceback_descr(e))
log.debug(e)
log.info("sleep 30 second and try again...")
time.sleep(30)
continue
break
try:
log.info("try init listeners")
client.add_listener(on_message)
client.add_ephemeral_listener(on_event)
client.add_invite_listener(on_invite)
client.start_listener_thread(exception_handler=exception_handler)
log.info("success init listeners")
except Exception as e:
log.error(get_exception_traceback_descr(e))
log.error("exception at execute main() at init listeners")
sys.exit(1)
try:
x=0
log.info("enter main loop")
while True:
log.debug("step %d"%x)
# Запускаем незапущенные потоки - при старте бота или же если подключился новый пользователь:
num=start_vk_polls(x)
if num > 0:
log.info("start_vk_polls() start %d new poller proccess for receive VK messages"%num)
time.sleep(10)
check_bot_status()
x+=1
except Exception as e:
log.error(get_exception_traceback_descr(e))
log.error("exception at execute main() at main loop")
sys.exit(1)
log.info("exit main loop")
def check_bot_status():
global client
global data
global log
global lock
try:
log.debug("=start function=")
change_flag=False
cur_ts = int(time.time())
for user in data["users"]:
user_data=data["users"][user]
# vk connection status:
if "vk" in user_data:
prev_connection_status="unknown"
if "connection_status" in data["users"][user]["vk"]:
prev_connection_status=data["users"][user]["vk"]["connection_status"]
if "ts_check_poll" in user_data["vk"]:
ts_check_poll=0
log.debug("try lock() before access global data()")
with lock:
log.debug("success lock() before access global data")
ts_check_poll=user_data["vk"]["ts_check_poll"]
log.debug("release lock() after access global data")
delta=cur_ts-ts_check_poll
log.debug("delta=%d"%delta)
if delta > 600:
log.debug("try lock() before access global data()")
with lock:
log.debug("success lock() before access global data")
data["users"][user]["vk"]["connection_status"]="error"
data["users"][user]["vk"]["connection_status_descr"]="более 10 минут не обновлялись данные из VK - пробую переподключиться"
data["users"][user]["vk"]["connection_status_update_ts"]=cur_ts
log.debug("release lock() after access global data")
log.info("wait 240 sec before set exif_flag=1")
# Задача на переподключение:
time.sleep(240) # ждём на всякий случай:
log.info("again check connection before before set exif_flag=1")
# Заново проверяем статус - если ситуация не изменилась - то выставим статус на переподключение:
cur_ts = int(time.time())
log.debug("try lock() before access global data()")
with lock:
log.debug("success lock() before access global data")
ts_check_poll=user_data["vk"]["ts_check_poll"]
log.debug("release lock() after access global data")
delta=cur_ts-ts_check_poll
if delta > 600:
log.info("delta not connection = %d seconds. Set exit_flag = 1" % delta)
log.debug("try lock() before access global data()")
with lock:
log.debug("success lock() before access global data")
if "exit" in data["users"][user]["vk"]:
log.debug("old status exit_flag for user %s = %s"%(user,str(data["users"][user]["vk"]["exit"])))
log.debug("set exit_flag for user '%s' to True"%user)
data["users"][user]["vk"]["exit"]=True
log.debug("release lock() after access global data")
else:
data["users"][user]["vk"]["exit"]=False
log.info("at 240 timeout bot was recconnect success - then we do not set exit_flag. Exit check_bot_status()")
else:
log.debug("try lock() before access global data()")
with lock:
log.debug("success lock() before access global data")
data["users"][user]["vk"]["connection_status"]="success"
data["users"][user]["vk"]["connection_status_descr"]="нет ошибок"
data["users"][user]["vk"]["connection_status_update_ts"]=cur_ts
log.debug("release lock() after access global data")
if "connection_status" in data["users"][user]["vk"]:
if prev_connection_status!=data["users"][user]["vk"]["connection_status"]:
change_flag=True
bot_system_message(user,"Изменён статус соединения с VK на '%s', детальное описание: '%s'"%(\
data["users"][user]["vk"]["connection_status"],\
data["users"][user]["vk"]["connection_status_descr"]\
))
return change_flag
except Exception as e:
log.error(get_exception_traceback_descr(e))
log.error("exception at execute check_bot_status()")
return False
def check_thread_exist(vk_id):
global log
try:
log.debug("=start function=")
for th in threading.enumerate():
if th.getName() == 'vk' + str(vk_id):
return True
return False
except Exception as e:
log.error(get_exception_traceback_descr(e))
log.error("exception at execute check_thread_exist()")
return False
def stop_thread(vk_id):
global log
try:
log.debug("=start function=")
for th in threading.enumerate():
if th.getName() == 'vk' + str(vk_id):
#th._stop_event.set()
#return True
# FIXME
log.info("FIXME pass hard stop thread - skip ")
return False
except Exception as e:
log.error(get_exception_traceback_descr(e))
log.error("exception at execute stop_thread()")
return False
# запуск потоков получения сообщений:
def start_vk_polls(check_iteration):
global data
global lock
global log
try:
log.debug("=start function=")
started=0
for user in data["users"]:
if "vk" in data["users"][user] and "vk_id" in data["users"][user]["vk"]:
log.debug("try lock() before access global data()")
with lock:
log.debug("success lock() before access global data")
vk_data=data["users"][user]["vk"]
vk_id=data["users"][user]["vk"]["vk_id"]
exit_flag=data["users"][user]["vk"]["exit"]
log.debug("release lock() after access global data")
if exit_flag:
log.info("exit_flag=True, try stop thread for user %s"%user)
time.sleep(3)
if stop_thread(vk_id) == False:
log.error("stop_thread(vk_id)")
else:
log.debug("success stop thread, try set exit_flag to False")
with lock:
log.debug("success lock() before access global data")
log.debug("set exit_flag for user '%s' to False"%user)
data["users"][user]["vk"]["exit"]=False
log.debug("release lock() after access global data")
log.debug("wait before restart thhread")
time.sleep(5)
if check_thread_exist(vk_id) == False:
log.info("no thread for user '%s' with name: '%s' - try start new tread"%(user,"vk"+str(vk_id)))
if check_iteration > 0:
# при первом запуске (и перезапуске сервиса) моста не сообщаем пользователям о запуске их потоков:
bot_system_message(user,"Не обнаружил потока, слушающего сообщения для пользователя '%s' и его VK id='%s'"%(user,str(vk_id)))
# обновляем информацию о пользователе:
if update_user_info(user) == False:
log.error("update_user_info")
if check_iteration > 0:
# при первом запуске (и перезапуске сервиса) моста не сообщаем пользователям о запуске их потоков:
bot_system_message(user,"Запускаю процесс получения сообщений из ВК...")
t = threading.Thread(name='vk' + str(vk_id), target=vk_receiver_thread, args=(user,))
t.setDaemon(True)
t.start()
started+=1
if check_iteration > 0:
# при первом запуске (и перезапуске сервиса) моста не сообщаем пользователям о запуске их потоков:
bot_system_message(user,"Успешно запустил процесс получения сообщений из ВК.")
return started
except Exception as e:
log.error(get_exception_traceback_descr(e))
log.error("exception at execute start_vk_polls()")
return 0
def get_name_from_url(url):
global log
try:
log.debug("=start function=")
return re.sub('.*/', '', url)
except Exception as e:
log.error(get_exception_traceback_descr(e))
log.error("exception at execute get_name_from_url()")
return None
def send_file_to_matrix(room,sender_name,attachment):
global log
log.debug("=start function=")
try:
src=attachment["doc"]['url']
size=attachment["doc"]['size']
image_data=get_data_from_url(src)
if image_data==None:
log.error("get image from url: %s"%src)
return False
# определение типа:
ext=attachment["doc"]["ext"]
mimetype="text/plain"
if ext == "txt":
mimetype="text/plain"
elif ext == "doc":
mimetype="application/msword"
elif ext == "xls":
mimetype="application/vnd.ms-excel"
elif ext == "odt":
mimetype="application/vnd.oasis.opendocument.text"
elif ext == "ods":
mimetype="application/vnd.oasis.opendocument.spreadsheet"
mxc_url=upload_file(image_data,mimetype)
if mxc_url == None:
log.error("uload file to matrix server")
return False
log.debug("send file 1")
if "title" in attachment["doc"]:
file_name=attachment["doc"]["title"]
else:
file_name=get_name_from_url(src)
if sender_name!=None:
file_name=sender_name+' прислал файл: '+file_name
if matrix_send_file(room,mxc_url,file_name,mimetype,size) == False:
log.error("send file to room")
return False
except Exception as e:
log.error("exception at parse attachemt '%s': %s"%(attachment["type"],e))
log.error("json of attachment:")
log.error(json.dumps(attachment, | |
-> fib(3) + fib(2)
self.assertIsInstance(fib_expr.left.left, Add) # fib(3) -> fib(2) + fib(1)
self.assertIsInstance(fib_expr.left.left.left, Number) # fib(2) -> 2
self.assertEqual(fib_expr.left.left.left.evaluate(), 2)
self.assertIsInstance(fib_expr.left.left.right, Number) # fib(1) -> 1
self.assertEqual(fib_expr.left.left.right.evaluate(), 1)
self.assertIsInstance(fib_expr.left.right, Number) # fib(2) -> 2 *repeats
self.assertEqual(fib_expr.left.right.evaluate(), 2)
self.assertIsInstance(fib_expr.right, Add) # fib(3) -> fib(2) + fib(1) *repeats
self.assertIsInstance(fib_expr.right.left, Number) # fib(2) -> 2 *repeats
self.assertEqual(fib_expr.right.left.evaluate(), 2)
self.assertIsInstance(fib_expr.right.right, Number) # fib(1) -> 1 *repeats
self.assertEqual(fib_expr.right.right.evaluate(), 1)
# Check repeated calls have resulted in the same object.
self.assertEqual(fib_expr.right.right, fib_expr.left.left.right) # fib(1)
self.assertEqual(fib_expr.right.left, fib_expr.left.left.left) # fib(2)
self.assertEqual(fib_expr.left.right, fib_expr.left.left.left) # fib(2)
self.assertEqual(fib_expr.right, fib_expr.left.left) # fib(3)
fib_value = fib_expr.evaluate()
self.assertIsInstance(fib_value, (int, float))
self.assertEqual(fib_value, 8)
# Check call cache has five calls.
self.assertEqual(len(fib_def.call_cache), 5)
# Just check call cache with fib(5) with fresh parser.
fib_def = dsl_compile("def fib(n): return fib(n-1) + fib(n-2) if n > 2 else n")
assert isinstance(fib_def, FunctionDef)
self.assertEqual(len(fib_def.call_cache), 0)
fib_expr = fib_def.apply(n=5)
self.assertEqual(len(fib_def.call_cache), 5)
self.assertEqual(fib_expr.evaluate(), 8)
self.assertEqual(len(fib_def.call_cache), 5)
def dsl_eval(dsl_source, filename='<unknown>', is_parallel=None, dsl_classes=None, compile_kwds=None,
evaluation_kwds=None, price_process_name=None, is_multiprocessing=False, pool_size=0, is_verbose=False,
is_show_source=False, **extra_evaluation_kwds):
"""
Returns the result of evaluating a compiled module (an expression, or a user defined function).
An expression (with optional function defs) will evaluate to a simple value.
A function def will evaluate to a DSL expression, will may then be evaluated (more than one
function def without an expression is an error).
"""
if price_process_name is None:
price_process_name = DEFAULT_PRICE_PROCESS_NAME
if evaluation_kwds is None:
evaluation_kwds = DslNamespace()
# assert isinstance(evaluation_kwds, dict)
evaluation_kwds.update(extra_evaluation_kwds)
if is_show_source:
print_("Reading DSL source:")
print_()
print_('"""')
print_(dsl_source.strip())
print_('"""')
print_()
if is_verbose:
print_("Compiling DSL source, please wait...")
print_()
compile_start_time = datetime.datetime.now()
# Compile the source into a primitive DSL expression, with optional dependency graph.
dsl_expr = dsl_compile(dsl_source, filename=filename, is_parallel=is_parallel, dsl_classes=dsl_classes,
compile_kwds=compile_kwds)
# Measure the compile_dsl_module time.
compile_time_delta = datetime.datetime.now() - compile_start_time
# Check the result of the compilation.
assert isinstance(dsl_expr, DslExpression), type(dsl_expr)
if is_verbose:
print_("Duration of compilation: %s" % compile_time_delta)
print_()
# If the expression has any stochastic elements, the evaluation kwds must have an 'observation_date' (datetime).
if dsl_expr.has_instances(dsl_type=StochasticObject):
observation_date = evaluation_kwds['observation_date']
assert isinstance(observation_date, datetime.date)
if is_verbose:
print_("Observation time: %s" % observation_date)
print_()
# Avoid any confusion with the internal 'present_time' variable.
if 'present_time' in evaluation_kwds:
msg = ("Don't set present_time here, set observation_date instead. "
"Hint: Adjust effective present time with Fixing or Wait elements.")
raise DslError(msg)
# Initialise present_time as observation_date.
evaluation_kwds['present_time'] = observation_date
# If the expression has any Market elements, a market simulation is required
if dsl_expr.has_instances(dsl_type=Market):
# If a market simulation is required, evaluation kwds must have 'path_count' (integer).
if 'path_count' not in evaluation_kwds:
evaluation_kwds['path_count'] = DEFAULT_PATH_COUNT
path_count = evaluation_kwds['path_count']
assert isinstance(path_count, int)
# If a market simulation is required, evaluation_kwds must have 'market_calibration' (integer).
market_calibration = evaluation_kwds['market_calibration']
assert isinstance(market_calibration, dict)
# If a market simulation is required, generate the simulated prices using the price process.
if not 'all_market_prices' in evaluation_kwds:
if is_verbose:
print_("Price process: %s" % price_process_name)
print_()
price_process = get_price_process(price_process_name)
if is_verbose:
print_("Path count: %d" % path_count)
print_()
if is_verbose:
print_("Finding all Market names and Fixing dates...")
print_()
# Extract market names from the expression.
# Todo: Avoid doing this on the dependency graph, when all the Market elements must be in the original.
market_names = find_delivery_points(dsl_expr)
# Extract fixing dates from the expression.
# Todo: Perhaps collect the fixing dates?
fixing_dates = list_fixing_dates(dsl_expr)
if is_verbose:
print_(
"Simulating future prices for Market%s '%s' from observation time %s through fixing dates: "
"%s." % (
'' if len(market_names) == 1 else 's',
", ".join(market_names),
"'%04d-%02d-%02d'" % (observation_date.year, observation_date.month, observation_date.day),
# Todo: Only print first and last few, if there are loads.
", ".join(["'%04d-%02d-%02d'" % (d.year, d.month, d.day) for d in fixing_dates[:8]]) + \
(", [...]" if len(fixing_dates) > 9 else '') + \
((", '%04d-%02d-%02d'" % (
fixing_dates[-1].year, fixing_dates[-1].month, fixing_dates[-1].day)) if len(
fixing_dates) > 8 else '')
))
print_()
# Simulate the future prices.
all_market_prices = price_process.simulate_future_prices(market_names, fixing_dates, observation_date,
path_count, market_calibration)
# Add future price simulation to evaluation_kwds.
evaluation_kwds['all_market_prices'] = all_market_prices
# Initialise the evaluation timer variable (needed by showProgress thread).
evalStartTime = None
if is_parallel:
if is_verbose:
len_stubbed_exprs = len(dsl_expr.stubbed_calls)
lenLeafIds = len(dsl_expr.leaf_ids)
msg = "Evaluating %d expressions (%d %s) with " % (
len_stubbed_exprs, lenLeafIds, 'leaf' if lenLeafIds == 1 else 'leaves')
if is_multiprocessing and pool_size:
msg += "a multiprocessing pool of %s workers" % pool_size
else:
msg += "a single thread"
msg += ", please wait..."
print_(msg)
print_()
# Define showProgress() thread.
def showProgress(stop):
progress = 0
movingRates = []
while progress < 100 and not stop.is_set():
time.sleep(0.3)
if evalStartTime is None:
continue
# Avoid race condition.
if not hasattr(dsl_expr, 'runner') or not hasattr(dsl_expr.runner, 'resultIds'):
continue
if stop.is_set():
break
try:
lenResults = len(dsl_expr.runner.resultIds)
except IOError:
break
resultsTime = datetime.datetime.now()
movingRates.append((lenResults, resultsTime))
if len(movingRates) >= 15:
movingRates.pop(0)
if len(movingRates) > 1:
firstLenResults, firstTimeResults = movingRates[0]
lastLenResults, lastTimeResults = movingRates[-1]
lenDelta = lastLenResults - firstLenResults
resultsTimeDelta = lastTimeResults - firstTimeResults
timeDeltaSeconds = resultsTimeDelta.seconds + resultsTimeDelta.microseconds * 0.000001
rateStr = "%.2f expr/s" % (lenDelta / timeDeltaSeconds)
else:
rateStr = ''
progress = 100.0 * lenResults / len_stubbed_exprs
sys.stdout.write(
"\rProgress: %01.2f%% (%s/%s) %s " % (progress, lenResults, len_stubbed_exprs, rateStr))
sys.stdout.flush()
sys.stdout.write("\r")
sys.stdout.flush()
stop = threading.Event()
thread = threading.Thread(target=showProgress, args=(stop,))
thread.setDaemon(True)
thread.daemon = True
# Start showProgress() thread.
thread.start()
# Start timing the evaluation.
evalStartTime = datetime.datetime.now()
try:
# Evaluate the primitive DSL expression.
value = dsl_expr.evaluate(**evaluation_kwds)
except:
if is_parallel:
if is_verbose:
if thread.isAlive():
# print "Thread is alive..."
stop.set()
# print "Waiting to join with thread..."
thread.join(timeout=1)
# print "Joined with thread..."
raise
# Stop timing the evaluation.
evalTimeDelta = datetime.datetime.now() - evalStartTime
if is_verbose:
timeDeltaSeconds = evalTimeDelta.seconds + evalTimeDelta.microseconds * 0.000001
if is_parallel:
len_stubbed_exprs = len(dsl_expr.stubbed_calls)
rateStr = "(%.2f expr/s)" % (len_stubbed_exprs / timeDeltaSeconds)
else:
rateStr = ''
print_("Duration of evaluation: %s %s" % (evalTimeDelta, rateStr))
print_()
# Prepare the result.
import scipy
if isinstance(value, scipy.ndarray):
value_ = scipy.ndarray(value)
mean = value_.mean()
stderr = value_.std() / math.sqrt(len(value_))
return {
'mean': mean,
'stderr': stderr
}
else:
return value
def dsl_compile(dsl_source, filename='<unknown>', dsl_classes=None, compile_kwds=None, **extraCompileKwds):
"""
Returns a DSL expression, created according to the given DSL source module.
That is, if the source module contains a function def and an expression which
calls that function, then the expression's function call will be evaluated
and the resulting DSL expression will be substituted for the function call
in the module's expression, so that calls to user defined functions are eliminated
and a single DSL expression is obtained.
If the source module contains a function def, but no expression, the module is compiled
into a function def object. Calling .apply() on a function def object will return a DSL
expression object, which can be evaluated by calling its .evaluate() method.
"""
if compile_kwds is None:
compile_kwds = DslNamespace()
# assert isinstance(compile_kwds, dict)
compile_kwds.update(extraCompileKwds)
# Parse the source into a DSL module object.
dsl_module = dsl_parse(dsl_source, filename=filename, dsl_classes=dsl_classes)
# assert isinstance(dsl_module, Module)
# Compile the module into either a dependency graph
# if 'is_parallel' is True, otherwise a single primitive expression.
return compile_dsl_module(dsl_module, DslNamespace(), compile_kwds)
def find_delivery_points(dsl_expr):
# Find all unique market names.
all_delivery_points = set()
for dsl_market in dsl_expr.find_instances(dsl_type=AbstractMarket):
# assert isinstance(dsl_market, Market)
delivery_point = dsl_market.get_delivery_point()
if delivery_point not in all_delivery_points: # Deduplicate.
all_delivery_points.add(delivery_point)
yield delivery_point
def list_fixing_dates(dsl_expr):
# Find all unique fixing dates.
return sorted(list(find_fixing_dates(dsl_expr)))
def find_fixing_dates(dsl_expr):
for dsl_fixing in dsl_expr.find_instances(dsl_type=Fixing):
# assert isinstance(dsl_fixing, Fixing)
if dsl_fixing.date is not None:
yield dsl_fixing.date
def compile_dsl_module(dsl_module, dsl_locals=None, dsl_globals=None):
"""
Returns something that can be evaluated.
"""
# It's a module compilation, so create a new namespace "context".
if dsl_locals is None:
dsl_locals = {}
dsl_locals = DslNamespace(dsl_locals)
if dsl_globals is None:
dsl_globals = {}
dsl_globals = DslNamespace(dsl_globals)
# Can't do much with an empty module.
if len(dsl_module.body) == 0:
raise DslSyntaxError('empty module', node=dsl_module.node)
function_defs, expressions = extract_defs_and_exprs(dsl_module, dsl_globals)
# Handle different combinations of functions and module level expressions in different ways.
# Todo: Simplify this, but support library files first?
# Can't meaningfully evaluate more | |
if features[0] > 0.0020365312229841948
if features[3] <= 0.022401843452826142:
return 0
else: # if features[3] > 0.022401843452826142
return 1
else: # if features[2] > 0.018328781006857753
if features[3] <= 0.06313246791251004:
if features[15] <= 0.0040730624459683895:
return 1
else: # if features[15] > 0.0040730624459683895
return 1
else: # if features[3] > 0.06313246791251004
if features[0] <= 0.03462103079073131:
return 1
else: # if features[0] > 0.03462103079073131
return 0
else: # if features[19] > 0.018328781006857753
if features[0] <= 0.0040730624459683895:
if features[2] <= 0.0020365312229841948:
if features[14] <= 0.008146124891936779:
return 1
else: # if features[14] > 0.008146124891936779
return 0
else: # if features[2] > 0.0020365312229841948
if features[19] <= 0.03462103079073131:
return 1
else: # if features[19] > 0.03462103079073131
return 0
else: # if features[0] > 0.0040730624459683895
if features[8] <= 0.05091328057460487:
if features[13] <= 0.010182656114920974:
return 1
else: # if features[13] > 0.010182656114920974
return 0
else: # if features[8] > 0.05091328057460487
if features[0] <= 0.03462103079073131:
return 1
else: # if features[0] > 0.03462103079073131
return 0
##################################################
def tree_5b(features):
if features[12] <= 0.0040730624459683895:
if features[2] <= 0.008146124891936779:
if features[19] <= 0.0040730624459683895:
if features[19] <= 0.0040730624459683895:
if features[17] <= 0.0:
return 0
else: # if features[17] > 0.0
return 0
else: # if features[19] > 0.0040730624459683895
if features[2] <= 0.0:
return 0
else: # if features[2] > 0.0
return 0
else: # if features[19] > 0.0040730624459683895
if features[19] <= 0.008146124891936779:
if features[18] <= 0.0040730624459683895:
return 1
else: # if features[18] > 0.0040730624459683895
return 0
else: # if features[19] > 0.008146124891936779
if features[6] <= 0.0:
return 1
else: # if features[6] > 0.0
return 1
else: # if features[2] > 0.008146124891936779
if features[2] <= 0.012219187337905169:
if features[19] <= 0.0:
if features[9] <= 0.0040730624459683895:
return 1
else: # if features[9] > 0.0040730624459683895
return 0
else: # if features[19] > 0.0
if features[10] <= 0.0040730624459683895:
return 1
else: # if features[10] > 0.0040730624459683895
return 0
else: # if features[2] > 0.012219187337905169
if features[1] <= 0.012219187337905169:
if features[6] <= 0.008146124891936779:
return 1
else: # if features[6] > 0.008146124891936779
return 0
else: # if features[1] > 0.012219187337905169
return 1
else: # if features[12] > 0.0040730624459683895
if features[19] <= 0.016292249783873558:
if features[2] <= 0.020365312229841948:
if features[0] <= 0.0:
if features[10] <= 0.0040730624459683895:
return 0
else: # if features[10] > 0.0040730624459683895
return 1
else: # if features[0] > 0.0
if features[3] <= 0.020365312229841948:
return 0
else: # if features[3] > 0.020365312229841948
return 1
else: # if features[2] > 0.020365312229841948
if features[3] <= 0.06109593668952584:
if features[15] <= 0.0040730624459683895:
return 1
else: # if features[15] > 0.0040730624459683895
return 1
else: # if features[3] > 0.06109593668952584
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
else: # if features[19] > 0.016292249783873558
if features[0] <= 0.0040730624459683895:
if features[2] <= 0.0040730624459683895:
if features[14] <= 0.008146124891936779:
return 1
else: # if features[14] > 0.008146124891936779
return 0
else: # if features[2] > 0.0040730624459683895
if features[19] <= 0.036657562013715506:
return 1
else: # if features[19] > 0.036657562013715506
return 0
else: # if features[0] > 0.0040730624459683895
if features[8] <= 0.052949811797589064:
if features[13] <= 0.012219187337905169:
return 1
else: # if features[13] > 0.012219187337905169
return 0
else: # if features[8] > 0.052949811797589064
if features[0] <= 0.036657562013715506:
return 1
else: # if features[0] > 0.036657562013715506
return 0
##################################################
def tree_4b(features):
if features[12] <= 0.0:
if features[2] <= 0.008146124891936779:
if features[19] <= 0.008146124891936779:
if features[19] <= 0.0:
if features[17] <= 0.0:
return 0
else: # if features[17] > 0.0
return 0
else: # if features[19] > 0.0
if features[2] <= 0.0:
return 0
else: # if features[2] > 0.0
return 0
else: # if features[19] > 0.008146124891936779
if features[19] <= 0.008146124891936779:
if features[18] <= 0.008146124891936779:
return 1
else: # if features[18] > 0.008146124891936779
return 0
else: # if features[19] > 0.008146124891936779
if features[6] <= 0.0:
return 1
else: # if features[6] > 0.0
return 1
else: # if features[2] > 0.008146124891936779
if features[2] <= 0.008146124891936779:
if features[19] <= 0.0:
if features[9] <= 0.0:
return 1
else: # if features[9] > 0.0
return 0
else: # if features[19] > 0.0
if features[10] <= 0.0:
return 1
else: # if features[10] > 0.0
return 0
else: # if features[2] > 0.008146124891936779
if features[1] <= 0.016292249783873558:
if features[6] <= 0.008146124891936779:
return 1
else: # if features[6] > 0.008146124891936779
return 0
else: # if features[1] > 0.016292249783873558
return 1
else: # if features[12] > 0.0
if features[19] <= 0.016292249783873558:
if features[2] <= 0.016292249783873558:
if features[0] <= 0.0:
if features[10] <= 0.008146124891936779:
return 0
else: # if features[10] > 0.008146124891936779
return 1
else: # if features[0] > 0.0
if features[3] <= 0.024438374675810337:
return 0
else: # if features[3] > 0.024438374675810337
return 1
else: # if features[2] > 0.016292249783873558
if features[3] <= 0.05702287424355745:
if features[15] <= 0.008146124891936779:
return 1
else: # if features[15] > 0.008146124891936779
return 1
else: # if features[3] > 0.05702287424355745
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
else: # if features[19] > 0.016292249783873558
if features[0] <= 0.0:
if features[2] <= 0.0:
if features[14] <= 0.008146124891936779:
return 1
else: # if features[14] > 0.008146124891936779
return 0
else: # if features[2] > 0.0
if features[19] <= 0.032584499567747116:
return 1
else: # if features[19] > 0.032584499567747116
return 0
else: # if features[0] > 0.0
if features[8] <= 0.048876749351620674:
if features[13] <= 0.008146124891936779:
return 1
else: # if features[13] > 0.008146124891936779
return 0
else: # if features[8] > 0.048876749351620674
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
##################################################
def tree_3b(features):
if features[12] <= 0.0:
if features[2] <= 0.016292249783873558:
if features[19] <= 0.0:
if features[19] <= 0.0:
if features[17] <= 0.0:
return 0
else: # if features[17] > 0.0
return 0
else: # if features[19] > 0.0
if features[2] <= 0.0:
return 0
else: # if features[2] > 0.0
return 0
else: # if features[19] > 0.0
if features[19] <= 0.0:
if features[18] <= 0.0:
return 1
else: # if features[18] > 0.0
return 0
else: # if features[19] > 0.0
if features[6] <= 0.0:
return 1
else: # if features[6] > 0.0
return 1
else: # if features[2] > 0.016292249783873558
if features[2] <= 0.016292249783873558:
if features[19] <= 0.0:
if features[9] <= 0.0:
return 1
else: # if features[9] > 0.0
return 0
else: # if features[19] > 0.0
if features[10] <= 0.0:
return 1
else: # if features[10] > 0.0
return 0
else: # if features[2] > 0.016292249783873558
if features[1] <= 0.016292249783873558:
if features[6] <= 0.016292249783873558:
return 1
else: # if features[6] > 0.016292249783873558
return 0
else: # if features[1] > 0.016292249783873558
return 1
else: # if features[12] > 0.0
if features[19] <= 0.016292249783873558:
if features[2] <= 0.016292249783873558:
if features[0] <= 0.0:
if features[10] <= 0.0:
return 0
else: # if features[10] > 0.0
return 1
else: # if features[0] > 0.0
if features[3] <= 0.016292249783873558:
return 0
else: # if features[3] > 0.016292249783873558
return 1
else: # if features[2] > 0.016292249783873558
if features[3] <= 0.048876749351620674:
if features[15] <= 0.0:
return 1
else: # if features[15] > 0.0
return 1
else: # if features[3] > 0.048876749351620674
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
else: # if features[19] > 0.016292249783873558
if features[0] <= 0.0:
if features[2] <= 0.0:
if features[14] <= 0.016292249783873558:
return 1
else: # if features[14] > 0.016292249783873558
return 0
else: # if features[2] > 0.0
if features[19] <= 0.032584499567747116:
return 1
else: # if features[19] > 0.032584499567747116
return 0
else: # if features[0] > 0.0
if features[8] <= 0.048876749351620674:
if features[13] <= 0.016292249783873558:
return 1
else: # if features[13] > 0.016292249783873558
return 0
else: # if features[8] > 0.048876749351620674
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
##################################################
def tree_2b(features):
if features[12] <= 0.0:
if features[2] <= 0.0:
if features[19] <= 0.0:
if features[19] <= 0.0:
if features[17] <= 0.0:
return 0
else: # if features[17] > 0.0
return 0
else: # if features[19] > 0.0
| |
`+edit trophyboard sort loss`
**Required Permissions**
:warning: Manage Server
"""
if sort_by not in ['trophies', 'gain', 'loss']:
return await ctx.send("Oops, that didn't look right! Try `trophies`, `gain` or `loss` instead.")
query = "UPDATE boards SET sort_by = $1 WHERE channel_id = $2"
await ctx.db.execute(query, sort_by, ctx.config.channel_id)
await self.bot.donationboard.update_board(ctx.config.channel_id)
await ctx.confirm()
@edit.group(name='donationlog')
@manage_guild()
@requires_config('donationlog', invalidate=True)
async def edit_donationlog(self, ctx):
"""[Group] Edit the donationlog settings."""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@edit_donationlog.command(name='interval')
async def edit_donationlog_interval(self, ctx, channel: typing.Optional[TextChannel], minutes: int = 1):
"""Update the interval (in minutes) for which the bot will log your donations.
**Parameters**
:key: Discord Channel (mention etc.)
:key: Interval length (in minutes)
**Format**
:information_source: `+edit donationlog interval #CHANNEL MINUTES`
**Example**
:white_check_mark: `+edit donationlog interval #logging 5`
**Required Permissions**
:warning: Manage Server
"""
if not ctx.config:
return await ctx.send('Oops! It doesn\'t look like a donationlog is setup here. '
'Try `+info log` to find where the registered channels are!')
query = """UPDATE logs
SET interval = ($1 ||' minutes')::interval
WHERE channel_id=$2
AND type = $3
"""
await ctx.db.execute(query, str(minutes), ctx.config.channel_id, 'donation')
await ctx.send(f'Logs for {ctx.config.channel.mention} have been changed to {minutes} minutes. '
'Find which clans this affects with `+help info donationlog`')
@edit_donationlog.command(name='toggle')
async def edit_donationlog_toggle(self, ctx):
"""Toggle the donation log on and off.
**Format**
:information_source: `+edit donationlog toggle`
**Example**
:white_check_mark: `+edit donationlog toggle`
**Required Permissions**
:warning: Manage Server
"""
if not ctx.config:
return await ctx.send('Oops! It doesn\'t look like a donationlog is setup here. '
'Try `+info log` to find where the registered channels are!')
query = """UPDATE logs
SET toggle = NOT toggle
WHERE channel_id=$1
AND type = $2
RETURNING toggle
"""
toggle = await ctx.db.fetch(query, ctx.config.channel_id, 'donation')
if toggle:
condition = 'on'
else:
condition = 'off'
await ctx.send(f'Logs for {ctx.config.channel.mention} have been turned {condition}.')
@edit.group(name='trophylog')
@manage_guild()
@requires_config('trophylog', invalidate=True)
async def edit_trophylog(self, ctx):
"""[Group] Edit the trophylog settings."""
if not ctx.invoked_subcommand:
await ctx.send_help(ctx.command)
@edit_trophylog.command(name='interval')
async def edit_trophylog_interval(self, ctx, channel: typing.Optional[TextChannel], minutes: int = 1):
"""Update the interval (in minutes) for which the bot will log your trophies.
**Parameters**
:key: Discord Channel (mention etc.)
:key: Interval length (in minutes)
**Format**
:information_source: `+edit trophylog interval #CHANNEL MINUTES`
**Example**
:white_check_mark: `+edit trophylog interval #logging 5`
**Required Permissions**
:warning: Manage Server
"""
if not ctx.config:
return await ctx.send('Oops! It doesn\'t look like a trophylog is setup here. '
'Try `+info log` to find where the registered channels are!')
query = """UPDATE logs
SET interval = ($1 ||' minutes')::interval
WHERE channel_id=$2
AND type = $3
"""
await ctx.db.execute(query, str(minutes), ctx.config.channel_id, 'trophy')
await ctx.send(f'Logs for {ctx.config.channel.mention} have been changed to {minutes} minutes. '
'Find which clans this affects with `+help info trophylog`')
@edit_trophylog.command(name='toggle')
async def edit_trophylog_toggle(self, ctx):
"""Toggle the trophy log on and off.
**Format**
:information_source: `+edit trophylog toggle`
**Example**
:white_check_mark: `+edit trophylog toggle`
**Required Permissions**
:warning: Manage Server
"""
if not ctx.config:
return await ctx.send('Oops! It doesn\'t look like a trophylog is setup here. '
'Try `+info log` to find where the registered channels are!')
query = """UPDATE logs
SET toggle = NOT toggle
WHERE channel_id=$1
AND type = $2
RETURNING toggle
"""
toggle = await ctx.db.execute(query, ctx.config.channel_id, 'trophy')
if toggle:
condition = 'on'
else:
condition = 'off'
await ctx.send(f'Logs for {ctx.config.channel.mention} have been turned {condition}.')
@edit.command(name='event')
@manage_guild()
@requires_config('event', invalidate=True)
async def edit_event(self, ctx, *, event_name: str = None):
"""Edit a variety of settings for the current event.
**Parameters**
:key: Event name
**Format**
:information_source: `+edit event EVENT_NAME`
**Example**
:white_check_mark: `+edit event Donation Bot Event`
**Required Permissions**
:warning: Manage Server
"""
if event_name:
query = """SELECT id FROM events
WHERE guild_id = $1
AND event_name = $2"""
fetch = await self.bot.pool.fetchrow(query, ctx.guild.id, event_name)
if fetch:
event_id = fetch['id']
else:
# ideally this would just display a list of events and let the user pick, but I
# couldn't figure out the proper sequence of if event_name/if event_id
return await ctx.send("There is no event on this server with that name. Try `+edit event` "
"to pick from a list of events on this server.")
else:
# No event name provided or I didn't understand the name I was given
query = """SELECT id, event_name, start
FROM events
WHERE guild_id = $1
ORDER BY start"""
fetch = await self.bot.pool.fetch(query, ctx.guild.id)
if len(fetch) == 0 or not fetch:
return await ctx.send("There are no events currently set up on this server. "
"Try `+add event`")
elif len(fetch) == 1:
event_id = fetch[0]['id']
else:
table = CLYTable()
fmt = f"Events on {ctx.guild}:\n\n"
reactions = []
counter = 0
for event in fetch:
days_until = event['start'].date() - datetime.datetime.utcnow().date()
table.add_row([counter, days_until.days, event['event_name']])
counter += 1
reactions.append(f"{counter}\N{combining enclosing keycap}")
render = table.events_list()
fmt += f'{render}\n\nPlease select the reaction that corresponds with the event you would ' \
f'like to remove.'
e = discord.Embed(colour=self.bot.colour,
description=fmt)
msg = await ctx.send(embed=e)
for r in reactions:
await msg.add_reaction(r)
def check(r, u):
return str(r) in reactions and u.id == ctx.author.id and r.message.id == msg.id
try:
r, u = await self.bot.wait_for('reaction_add', check=check, timeout=60.0)
except asyncio.TimeoutError:
await msg.clear_reactions()
return await ctx.send("I feel like I'm being ignored. MAybe try again later?")
index = reactions.index(str(r))
event_id = fetch[index]['id']
# Now that we have the event_id, let's edit things
query = """SELECT event_name, start, finish
FROM events
WHERE id = $1"""
event = await self.bot.pool.fetchrow(query, event_id)
def check_author(m):
return m.author == ctx.author
answer = await ctx.prompt(f"Event Name: **{event['event_name']}**\n"
f"Would you like to edit the event name?")
if answer:
try:
await ctx.send('Please enter the new name for this event.')
response = await ctx.bot.wait_for('message', check=check_author, timeout=60.0)
new_event_name = response.content
except asyncio.TimeoutError:
new_event_name = event['event_name']
else:
new_event_name = event['event_name']
answer = await ctx.prompt(f"Start Date: **{event['start'].date()}\n"
f"Would you like to edit the date?")
if answer:
try:
await ctx.send('Please enter the new start date. (YYYY-MM-DD)')
response = await ctx.bot.wait_for('message', check=check_author, timeout=60.0)
new_start_date = await DateConverter().convert(ctx, response.clean_content)
except (ValueError, commands.BadArgument):
await ctx.send('Date must be in the YYYY-MM-DD format. I\'m going to keep '
'the current start date and you can change it later if you like.')
new_start_date = event['start'].date()
except asyncio.TimeoutError:
await ctx.send('Seems as though you don\'t really know the answer. I\'m just going '
'to keep the date I have for now.')
new_start_date = event['start'].date()
else:
new_start_date = event['start'].date()
answer = await ctx.prompt(f"Start Time: **{event['start'].time()}\n"
f"Would you like to edit the time?")
if answer:
try:
await ctx.send('Please enter the new start time. (Please provide HH:MM in UTC)')
response = await ctx.bot.wait_for('message', check=check_author, timeout=60.0)
hour, minute = map(int, response.content.split(':'))
if hour < 13:
try:
await ctx.send('And is that AM or PM?')
response = await ctx.bot.wait_for('message', check=check_author, timeout=60.0)
if response.content.lower() == 'pm':
hour += 12
except asyncio.TimeoutError:
if hour < 6:
await ctx.send('Well I\'ll just go with PM then.')
hour += 12
else:
await ctx.send('I\'m going to assume you want AM.')
new_start_time = datetime.time(hour, minute)
except asyncio.TimeoutError:
await ctx.send('Time\'s up my friend. Start time will remain the same!')
new_start_time = event['start'].time()
else:
new_start_time = event['start'].time()
answer = await ctx.prompt(f"End Date: **{event['finish'].date()}\n"
f"Would you like to edit the date?")
if answer:
try:
await ctx.send('Please enter the new end date. (YYYY-MM-DD)')
response = await ctx.bot.wait_for('message', check=check_author, timeout=60.0)
new_end_date = await DateConverter().convert(ctx, response.clean_content)
except (ValueError, commands.BadArgument):
await ctx.send('Date must be in the YYYY-MM-DD format. I\'m going to keep '
'the current end date and you can change it later if you like.')
new_end_date = event['finish'].date()
except asyncio.TimeoutError:
await ctx.send('Seems as though you don\'t really know the answer. I\'m just going '
'to keep the date I have for now.')
new_end_date = event['finish'].date()
else:
new_end_date = event['finish'].date()
answer = await ctx.prompt(f"End Time: **{event['finish'].time()}\n"
f"Would you like to edit the time?")
if answer:
try:
await ctx.send('Please enter the new end time. (Please provide HH:MM in UTC)')
response = await ctx.bot.wait_for('message', check=check_author, timeout=60.0)
hour, minute = map(int, response.content.split(':'))
if hour < 13:
try:
await ctx.send('And is that AM or PM?')
response = await ctx.bot.wait_for('message', check=check_author, timeout=60.0)
if response.content.lower() == 'pm':
| |
# Generated by Django 1.11.24 on 2019-09-10 10:28
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import django_fsm
import model_utils.fields
from django.db import migrations, models
import waldur_core.core.fields
import waldur_core.core.models
import waldur_core.core.shims
import waldur_core.core.validators
import waldur_core.logging.loggers
import waldur_core.structure.models
import waldur_openstack.openstack_tenant.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('structure', '0001_squashed_0054'),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Backup',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'kept_until',
models.DateTimeField(
blank=True,
help_text='Guaranteed time of backup retention. If null - keep forever.',
null=True,
),
),
(
'metadata',
waldur_core.core.fields.JSONField(
blank=True,
help_text='Additional information about backup, can be used for backup restoration or deletion',
),
),
],
options={'abstract': False,},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.structure.models.StructureLoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='BackupRestoration',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
(
'backup',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='restorations',
to='openstack_tenant.Backup',
),
),
],
options={'abstract': False,},
),
migrations.CreateModel(
name='BackupSchedule',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'schedule',
waldur_core.core.fields.CronScheduleField(
max_length=15,
validators=[
waldur_core.core.validators.validate_cron_schedule,
waldur_core.core.validators.MinCronValueValidator(1),
],
),
),
('next_trigger_at', models.DateTimeField(null=True)),
(
'timezone',
models.CharField(
default=django.utils.timezone.get_current_timezone_name,
max_length=50,
),
),
('is_active', models.BooleanField(default=False)),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'retention_time',
models.PositiveIntegerField(
help_text='Retention time in days, if 0 - resource will be kept forever'
),
),
('maximal_number_of_resources', models.PositiveSmallIntegerField()),
(
'call_count',
models.PositiveSmallIntegerField(
default=0,
help_text='How many times a resource schedule was called.',
),
),
],
options={'abstract': False,},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.structure.models.StructureLoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='Flavor',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
(
'cores',
models.PositiveSmallIntegerField(
help_text='Number of cores in a VM'
),
),
('ram', models.PositiveIntegerField(help_text='Memory size in MiB')),
(
'disk',
models.PositiveIntegerField(help_text='Root disk size in MiB'),
),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
bases=(
waldur_core.logging.loggers.LoggableMixin,
waldur_core.core.models.BackendModelMixin,
models.Model,
),
),
migrations.CreateModel(
name='FloatingIP',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
(
'address',
models.GenericIPAddressField(
default=None, null=True, protocol='IPv4'
),
),
('runtime_state', models.CharField(max_length=30)),
(
'backend_network_id',
models.CharField(editable=False, max_length=255),
),
(
'is_booked',
models.BooleanField(
default=False,
help_text='Marks if floating IP has been booked for provisioning.',
),
),
],
options={
'verbose_name': 'Floating IP',
'verbose_name_plural': 'Floating IPs',
},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='Image',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
(
'min_disk',
models.PositiveIntegerField(
default=0, help_text='Minimum disk size in MiB'
),
),
(
'min_ram',
models.PositiveIntegerField(
default=0, help_text='Minimum memory size in MiB'
),
),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={'abstract': False,},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='Instance',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
('latitude', models.FloatField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
(
'cores',
models.PositiveSmallIntegerField(
default=0, help_text='Number of cores in a VM'
),
),
(
'ram',
models.PositiveIntegerField(
default=0, help_text='Memory size in MiB'
),
),
(
'disk',
models.PositiveIntegerField(
default=0, help_text='Disk size in MiB'
),
),
(
'min_ram',
models.PositiveIntegerField(
default=0, help_text='Minimum memory size in MiB'
),
),
(
'min_disk',
models.PositiveIntegerField(
default=0, help_text='Minimum disk size in MiB'
),
),
('image_name', models.CharField(blank=True, max_length=150)),
('key_name', models.CharField(blank=True, max_length=50)),
('key_fingerprint', models.CharField(blank=True, max_length=47)),
(
'user_data',
models.TextField(
blank=True,
help_text='Additional data that will be added to instance on provisioning',
),
),
('start_time', models.DateTimeField(blank=True, null=True)),
('backend_id', models.CharField(blank=True, max_length=255, null=True)),
('flavor_name', models.CharField(blank=True, max_length=255)),
(
'flavor_disk',
models.PositiveIntegerField(
default=0, help_text='Flavor disk size in MiB'
),
),
('action', models.CharField(blank=True, max_length=50)),
('action_details', waldur_core.core.fields.JSONField(default=dict)),
],
options={'ordering': ['name', 'created']},
bases=(
waldur_openstack.openstack_tenant.models.TenantQuotaMixin,
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.structure.models.StructureLoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='InstanceAvailabilityZone',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('available', models.BooleanField(default=True)),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='InternalIP',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('mac_address', models.CharField(blank=True, max_length=32)),
(
'ip4_address',
models.GenericIPAddressField(
blank=True, null=True, protocol='IPv4'
),
),
(
'ip6_address',
models.GenericIPAddressField(
blank=True, null=True, protocol='IPv6'
),
),
('backend_id', models.CharField(max_length=255, null=True)),
(
'instance',
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='internal_ips_set',
to='openstack_tenant.Instance',
),
),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='Network',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
('is_external', models.BooleanField(default=False)),
('type', models.CharField(blank=True, max_length=50)),
('segmentation_id', models.IntegerField(null=True)),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='OpenStackTenantService',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
(
'available_for_all',
models.BooleanField(
default=False,
help_text='Service will be automatically added to all customers projects if it is available for all',
),
),
(
'customer',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='structure.Customer',
verbose_name='organization',
),
),
],
options={
'verbose_name': 'OpenStackTenant provider',
'verbose_name_plural': 'OpenStackTenant providers',
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.structure.models.StructureLoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='OpenStackTenantServiceProjectLink',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'project',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='structure.Project',
),
),
(
'service',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='openstack_tenant.OpenStackTenantService',
),
),
],
options={
'abstract': False,
'verbose_name': 'OpenStackTenant provider project link',
'verbose_name_plural': 'OpenStackTenant provider project links',
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SecurityGroup',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='SecurityGroupRule',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'protocol',
models.CharField(
blank=True,
choices=[('tcp', 'tcp'), ('udp', 'udp'), ('icmp', 'icmp')],
max_length=4,
),
),
(
'from_port',
models.IntegerField(
null=True,
validators=[django.core.validators.MaxValueValidator(65535)],
),
),
(
'to_port',
models.IntegerField(
null=True,
validators=[django.core.validators.MaxValueValidator(65535)],
),
),
('cidr', models.CharField(blank=True, max_length=32)),
('backend_id', models.CharField(blank=True, max_length=128)),
(
'security_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='rules',
to='openstack_tenant.SecurityGroup',
),
),
],
options={'abstract': False,},
),
migrations.CreateModel(
name='Snapshot',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('size', models.PositiveIntegerField(help_text='Size in MiB')),
('backend_id', models.CharField(blank=True, max_length=255, null=True)),
('metadata', waldur_core.core.fields.JSONField(blank=True)),
('action', models.CharField(blank=True, max_length=50)),
('action_details', waldur_core.core.fields.JSONField(default=dict)),
(
'kept_until',
models.DateTimeField(
blank=True,
help_text='Guaranteed time of snapshot retention. If null - keep forever.',
null=True,
),
),
(
'service_project_link',
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name='snapshots',
to='openstack_tenant.OpenStackTenantServiceProjectLink',
),
),
],
bases=(
waldur_openstack.openstack_tenant.models.TenantQuotaMixin,
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.structure.models.StructureLoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SnapshotRestoration',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
(
'snapshot',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='restorations',
to='openstack_tenant.Snapshot',
),
),
],
options={'abstract': False,},
),
migrations.CreateModel(
name='SnapshotSchedule',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.