input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>cinder/volume/drivers/datera.py<gh_stars>10-100
# Copyright 2016 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import json
import re
import uuid
import eventlet
import ipaddress
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import requests
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder import interface
from cinder import utils
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import utils as volutils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
d_opts = [
cfg.StrOpt('datera_api_port',
default='7717',
help='Datera API port.'),
cfg.StrOpt('datera_api_version',
default='2',
help='Datera API version.'),
cfg.IntOpt('datera_num_replicas',
default='3',
deprecated_for_removal=True,
help='Number of replicas to create of an inode.'),
cfg.IntOpt('datera_503_timeout',
default='120',
help='Timeout for HTTP 503 retry messages'),
cfg.IntOpt('datera_503_interval',
default='5',
help='Interval between 503 retries'),
cfg.BoolOpt('datera_debug',
default=False,
help="True to set function arg and return logging"),
cfg.BoolOpt('datera_acl_allow_all',
default=False,
deprecated_for_removal=True,
help="True to set acl 'allow_all' on volumes "
"created"),
cfg.BoolOpt('datera_debug_replica_count_override',
default=False,
help="ONLY FOR DEBUG/TESTING PURPOSES\n"
"True to set replica_count to 1")
]
CONF = cfg.CONF
CONF.import_opt('driver_use_ssl', 'cinder.volume.driver')
CONF.register_opts(d_opts)
DEFAULT_SI_SLEEP = 10
INITIATOR_GROUP_PREFIX = "IG-"
OS_PREFIX = "OS-"
UNMANAGE_PREFIX = "UNMANAGED-"
# Taken from this SO post :
# http://stackoverflow.com/a/18516125
# Using old-style string formatting because of the nature of the regex
# conflicting with new-style curly braces
UUID4_STR_RE = ("%s[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]"
"[a-f0-9]{3}-?[a-f0-9]{12}")
UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX)
# Recursive dict to assemble basic url structure for the most common
# API URL endpoints. Most others are constructed from these
URL_TEMPLATES = {
'ai': lambda: 'app_instances',
'ai_inst': lambda: (URL_TEMPLATES['ai']() + '/{}'),
'si': lambda: (URL_TEMPLATES['ai_inst']() + '/storage_instances'),
'si_inst': lambda storage_name: (
(URL_TEMPLATES['si']() + '/{}').format(
'{}', storage_name)),
'vol': lambda storage_name: (
(URL_TEMPLATES['si_inst'](storage_name) + '/volumes')),
'vol_inst': lambda storage_name, volume_name: (
(URL_TEMPLATES['vol'](storage_name) + '/{}').format(
'{}', volume_name))}
def _get_name(name):
return "".join((OS_PREFIX, name))
def _get_unmanaged(name):
return "".join((UNMANAGE_PREFIX, name))
def _authenticated(func):
"""Ensure the driver is authenticated to make a request.
In do_setup() we fetch an auth token and store it. If that expires when
we do API request, we'll fetch a new one.
"""
@functools.wraps(func)
def func_wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except exception.NotAuthorized:
# Prevent recursion loop. After the self arg is the
# resource_type arg from _issue_api_request(). If attempt to
# login failed, we should just give up.
if args[0] == 'login':
raise
# Token might've expired, get a new one, try again.
self._login()
return func(self, *args, **kwargs)
return func_wrapper
@interface.volumedriver
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class DateraDriver(san.SanISCSIDriver):
"""The OpenStack Datera Driver
Version history:
1.0 - Initial driver
1.1 - Look for lun-0 instead of lun-1.
2.0 - Update For Datera API v2
2.1 - Multipath, ACL and reorg
2.2 - Capabilites List, Extended Volume-Type Support
Naming convention change,
Volume Manage/Unmanage support
"""
VERSION = '2.2'
CI_WIKI_NAME = "datera-ci"
def __init__(self, *args, **kwargs):
super(DateraDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(d_opts)
self.username = self.configuration.san_login
self.password = self.configuration.san_password
self.cluster_stats = {}
self.datera_api_token = None
self.interval = self.configuration.datera_503_interval
self.retry_attempts = (self.configuration.datera_503_timeout /
self.interval)
self.driver_prefix = str(uuid.uuid4())[:4]
self.datera_debug = self.configuration.datera_debug
if self.datera_debug:
utils.setup_tracing(['method'])
def do_setup(self, context):
# If we can't authenticate through the old and new method, just fail
# now.
if not all([self.username, self.password]):
msg = _("san_login and/or san_password is not set for Datera "
"driver in the cinder.conf. Set this information and "
"start the cinder-volume service again.")
LOG.error(msg)
raise exception.InvalidInput(msg)
self._login()
@utils.retry(exception.VolumeDriverException, retries=3)
def _wait_for_resource(self, id, resource_type, policies):
result = self._issue_api_request(resource_type, 'get', id)
if result['storage_instances'][
policies['default_storage_name']]['volumes'][
policies['default_volume_name']]['op_state'] == 'available':
return
else:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
def _create_resource(self, resource, resource_type, body):
result = None
try:
result = self._issue_api_request(resource_type, 'post', body=body)
except exception.Invalid:
type_id = resource.get('volume_type_id', None)
if resource_type == 'volumes' and type_id:
LOG.error(_LE("Creation request failed. Please verify the "
"extra-specs set for your volume types are "
"entered correctly."))
raise
else:
policies = self._get_policies_for_resource(resource)
# Handle updating QOS Policies
if resource_type == URL_TEMPLATES['ai']():
self._update_qos(resource, policies)
if result['storage_instances'][policies['default_storage_name']][
'volumes'][policies['default_volume_name']][
'op_state'] == 'available':
return
self._wait_for_resource(_get_name(resource['id']),
resource_type,
policies)
def create_volume(self, volume):
"""Create a logical volume."""
# Generate App Instance, Storage Instance and Volume
# Volume ID will be used as the App Instance Name
# Storage Instance and Volumes will have standard names
policies = self._get_policies_for_resource(volume)
num_replicas = int(policies['replica_count'])
storage_name = policies['default_storage_name']
volume_name = policies['default_volume_name']
app_params = (
{
'create_mode': "openstack",
'uuid': str(volume['id']),
'name': _get_name(volume['id']),
'access_control_mode': 'deny_all',
'storage_instances': {
storage_name: {
'name': storage_name,
'volumes': {
volume_name: {
'name': volume_name,
'size': volume['size'],
'replica_count': num_replicas,
'snapshot_policies': {
}
}
}
}
}
})
self._create_resource(volume, URL_TEMPLATES['ai'](), body=app_params)
def extend_volume(self, volume, new_size):
# Offline App Instance, if necessary
reonline = False
app_inst = self._issue_api_request(
URL_TEMPLATES['ai_inst']().format(_get_name(volume['id'])))
if app_inst['admin_state'] == 'online':
reonline = True
self.detach_volume(None, volume, delete_initiator=False)
# Change Volume Size
app_inst = _get_name(volume['id'])
data = {
'size': new_size
}
policies = self._get_policies_for_resource(volume)
self._issue_api_request(
URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']).format(app_inst),
method='put',
body=data)
# Online Volume, if it was online before
if reonline:
self.create_export(None, volume, None)
def create_cloned_volume(self, volume, src_vref):
policies = self._get_policies_for_resource(volume)
src = "/" + URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']).format(_get_name(src_vref['id']))
data = {
'create_mode': 'openstack',
'name': _get_name(volume['id']),
'uuid': str(volume['id']),
'clone_src': src,
}
self._issue_api_request(URL_TEMPLATES['ai'](), 'post', body=data)
if volume['size'] > src_vref['size']:
self.extend_volume(volume, volume['size'])
def delete_volume(self, volume):
self.detach_volume(None, volume)
app_inst = _get_name(volume['id'])
try:
self._issue_api_request(URL_TEMPLATES['ai_inst']().format(
app_inst),
method='delete')
except exception.NotFound:
msg = _LI("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, _get_name(volume['id']))
def ensure_export(self, context, volume, connector):
"""Gets the associated account, retrieves CHAP info and updates."""
return self.create_export(context, volume, connector)
def initialize_connection(self, volume, connector):
# Now online the app_instance (which will online all storage_instances)
multipath = connector.get('multipath', False)
url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id']))
data = {
'admin_state': 'online'
}
app_inst = self._issue_api_request(url, method='put', body=data)
storage_instances = app_inst["storage_instances"]
si_names = list(storage_instances.keys())
portal = storage_instances[si_names[0]]['access']['ips'][0] + ':3260'
iqn = storage_instances[si_names[0]]['access']['iqn']
if multipath:
portals = [p + ':3260' for p in
storage_instances[si_names[0]]['access']['ips']]
iqns = [iqn for _ in
storage_instances[si_names[0]]['access']['ips']]
lunids = [self._get_lunid() for _ in
storage_instances[si_names[0]]['access']['ips']]
return {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_iqns': iqns,
'target_portal': portal,
'target_portals': portals,
'target_lun': self._get_lunid(),
'target_luns': lunids,
'volume_id': volume['id'],
'discard': False}}
else:
return {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_portal': portal,
'target_lun': self._get_lunid(),
'volume_id': volume['id'],
'discard': False}}
def create_export(self, context, volume, connector):
# Online volume in case it hasn't been already
url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id']))
data = {
'admin_state': 'online'
}
self._issue_api_request(url, method='put', body=data)
# Check if we've already setup everything for this volume
url = (URL_TEMPLATES['si']().format(_get_name(volume['id'])))
storage_instances = self._issue_api_request(url)
# Handle adding initiator to product if necessary
# Then add initiator to ACL
policies = self._get_policies_for_resource(volume)
if (connector and
connector.get('initiator') and
not policies['acl_allow_all']):
initiator_name = "OpenStack_{}_{}".format(
self.driver_prefix, str(uuid.uuid4())[:4])
initiator_group = INITIATOR_GROUP_PREFIX + volume['id']
found = False
initiator = connector['initiator']
current_initiators = self._issue_api_request('initiators')
for iqn, values in current_initiators.items():
if initiator == iqn:
found = True
break
# If we didn't find a matching initiator, create one
if not found:
data = {'id': initiator, 'name': initiator_name}
# Try and create the initiator
# If we get a conflict, ignore it because race conditions
self._issue_api_request("initiators",
method="post",
body=data,
conflict_ok=True)
# Create initiator group with initiator in it
initiator_path = "/initiators/{}".format(initiator)
initiator_group_path = "/initiator_groups/{}".format(
initiator_group)
ig_data = {'name': initiator_group, 'members': [initiator_path]}
self._issue_api_request("initiator_groups",
method="post",
body=ig_data,
conflict_ok=True)
# Create ACL with initiator group as reference for each
# storage_instance in app_instance
# TODO(_alastor_): We need to avoid changing the ACLs if the
# template already specifies an ACL policy.
for si_name in storage_instances.keys():
acl_url = (URL_TEMPLATES['si']() + "/{}/acl_policy").format(
_get_name(volume['id']), si_name)
data = {'initiator_groups': [initiator_group_path]}
self._issue_api_request(acl_url,
method="put",
body=data)
if connector and connector.get('ip'):
try:
# Case where volume_type has non default IP Pool info
if policies['ip_pool'] != 'default':
initiator_ip_pool_path = self._issue_api_request(
"access_network_ip_pools/{}".format(
policies['ip_pool']))['path']
# Fallback to trying reasonable IP based guess
else:
initiator_ip_pool_path = self._get_ip_pool_for_string_ip(
connector['ip'])
ip_pool_url = URL_TEMPLATES['si_inst'](
policies['default_storage_name']).format(
_get_name(volume['id']))
ip_pool_data = {'ip_pool': initiator_ip_pool_path}
self._issue_api_request(ip_pool_url,
method="put",
body=ip_pool_data)
except exception.DateraAPIException:
# Datera product 1.0 support
pass
# Check to ensure we're ready for go-time
self._si_poll(volume, policies)
| |
buffers into one template. The resulting template will be stored on both
buffers.
:return: Fingerprint set response.
:rtype: FpResponseSet
"""
return self._command_set(command=FpCommand.TEMPLATE_CREATE)
def template_load(self, buffer: tp.Union[int, FpBufferID], index: int = 0) -> FpResponseValue:
"""
Loads an existing template from the given database position into the specified char buffer.
:param Union[int, FpBufferID] buffer: Buffer ID.
:param int index: Position on the device database.
:return: Fingerprint value response.
:rtype: FpResponseValue
"""
return self._template_manage(buffer=buffer, index=index, save=False)
def template_save(self, buffer: tp.Union[int, FpBufferID], index: int = None) -> FpResponseValue:
"""
Saves a template from the specified char buffer at the given database position.
:param Union[int, FpBufferID] buffer: Buffer ID.
:param int index: Position on the device database. If none it'll save it on the lowest free position.
:return: Fingerprint value response.
:rtype: FpResponseValue
"""
return self._template_manage(buffer=buffer, index=index, save=True)
def template_empty(self) -> FpResponseSet:
"""
Delete all the templates from the device database.
:return: Fingerprint set response.
:rtype: FpResponseSet
"""
return self._command_set(command=FpCommand.TEMPLATE_EMPTY)
def template_delete(self, index: int = 0, count: int = 1) -> FpResponseSet:
"""
Deletes a template from the device database. By default one.
:param int index: Position on the device database.
:param int count: Number of templates to be deleted.
:return: Fingerprint set response.
:rtype: FpResponseSet
"""
if index < 0 or index >= self.capacity:
raise ValueError(f'Index exceeds device capacity: 0 < index < {self.capacity}')
if count < 0 or count > (self.capacity - index):
raise ValueError(f'The selection exceeds bounds: {index + count} > {self.capacity}')
pack = to_bytes(value=index, size=2) + to_bytes(value=count, size=2)
return self._command_set(command=FpCommand.TEMPLATE_DELETE, packet=pack)
def match_1_1(self) -> FpResponseMatch:
"""
Compare the contents stored in the char buffers and returns the accuracy score.
:return: Fingerprint match response.
:rtype: FpResponseMatch
"""
recv = self._command_get(command=FpCommand.TEMPLATE_MATCH)
index = -1
score = from_bytes(data=recv.pack[0:2])
return FpResponseMatch(succ=recv.succ, code=recv.code, index=index, score=score)
def match_1_n(self, buffer: tp.Union[int, FpBufferID], index: int = 0, count: int = None, fast: bool = False) -> FpResponseMatch:
"""
Searches the device database for the template in char buffer.
:param Union[int, FpBufferID] buffer: Buffer ID.
:param int index: Position on the device database.
:param int count: Number of templates to be compared.
:param bool fast: True to perform a fast search, false otherwise.
:return: Fingerprint match response.
:rtype: FpResponseMatch
"""
if count is None:
count = (self.capacity - index)
if index < 0 or index >= self.capacity:
raise ValueError(f'Index exceeds device capacity: 0 < index < {self.capacity}')
if count < 0 or count > (self.capacity - index):
raise ValueError(f'The selection exceeds bounds: {index + count} > {self.capacity}')
if not FpBufferID.has_value(value=buffer):
raise ValueError(f'Buffer value not supported: {buffer}')
cmd = FpCommand.TEMPLATE_SEARCH_FAST if fast else FpCommand.TEMPLATE_SEARCH
pack = bytearray([buffer]) + to_bytes(value=index, size=2) + to_bytes(value=count, size=2)
recv = self._command_get(command=cmd, packet=pack)
index = from_bytes(data=recv.pack[0:2]) if recv.succ else -1
score = from_bytes(data=recv.pack[2:4])
return FpResponseMatch(succ=recv.succ, code=recv.code, index=index, score=score)
def buffer_download(self, buffer: tp.Union[int, FpBufferID]) -> FpResponseValue:
"""
Downloads the char buffer data.
:param Union[int, FpBufferID] buffer: Buffer ID.
:return: Fingerprint value response.
:rtype: FpResponseValue
"""
if not FpBufferID.has_value(value=buffer):
raise ValueError(f'{FpBufferID.__name__} value not supported: {buffer}')
pack = bytearray([FpBufferID(buffer)])
recv = self._command_get(command=FpCommand.TEMPLATE_DOWNLOAD, packet=pack, data_wait=True)
return FpResponseValue(succ=recv.succ, code=recv.code, value=recv.data)
def buffer_upload(self, buffer: tp.Union[int, FpBufferID], data: bytearray) -> FpResponseSet:
"""
Uploads data to the char buffer. After transmission this function verifies the contents of the buffer to ensure
the successful transmission.
:param Union[int, FpBufferID] buffer: Buffer ID.
:param bytearray data: Data to be uploaded to the buffer.
:return: Fingerprint set response.
:rtype: FpResponseSet
"""
if not FpBufferID.has_value(value=buffer):
raise ValueError(f'{FpBufferID.__name__} value not supported: {buffer}')
if not data:
raise ValueError('Data is empty')
# Send
recv = self._command_set(command=FpCommand.TEMPLATE_UPLOAD, data=data)
if not recv.succ:
return recv
# Verify
recv = self.buffer_download(buffer=buffer)
succ = (recv.value == data)
code = FpError.SUCCESS if succ else FpError.ERROR_TEMPLATE_UPLOAD
return FpResponseSet(succ=succ, code=code)
def notepad_get(self, page: int) -> FpResponseValue:
"""
Get the selected notepad page contents.
:param int page: Page number.
:return: Fingerprint value response.
:rtype: FpResponseValue
"""
if page < 0 or page >= NOTEPAD_COUNT:
raise ValueError(f'Notepad page out of range: {0} <= {page} < {NOTEPAD_COUNT}')
recv = self._command_get(command=FpCommand.NOTEPAD_GET, packet=bytearray([page]))
if len(recv.pack) != NOTEPAD_SIZE:
raise BufferError(f'Notepad size is not the expected: {len(recv.pack)} instead of {NOTEPAD_SIZE}')
return FpResponseValue(succ=recv.succ, code=recv.code, value=recv.pack)
def notepad_set(self, page: int, data: bytearray) -> FpResponseSet:
"""
Set the selected notepad page contents.
:param int page: Page number.
:param bytearray data: Data to be written on the page.
:return: Fingerprint set response.
:rtype: FpResponseSet
"""
if page < 0 or page >= NOTEPAD_COUNT:
raise ValueError(f'Notepad page out of range: {0} <= {page} < {NOTEPAD_COUNT}')
if len(data) > NOTEPAD_SIZE:
embu.SDK_LOG.info(f'Cropping data to match the notepad page size: {len(data)} cropped to {NOTEPAD_SIZE}')
data = data[:NOTEPAD_SIZE]
pack = bytearray([page]) + data
return self._command_set(command=FpCommand.NOTEPAD_SET, packet=pack)
def notepad_clear(self, page: int) -> FpResponseSet:
"""
Clear the contents of the selected notepad page.
:param int page: Page number.
:return: Fingerprint set response.
:rtype: FpResponseSet
"""
return self.notepad_set(page=page, data=bytearray(NOTEPAD_SIZE * [0x00]))
def random_get(self) -> FpResponseValue:
"""
Generates a random 32-bit decimal number.
:return: Fingerprint value response.
:rtype: FpResponseValue
"""
recv = self._command_get(command=FpCommand.GENERATE_RANDOM)
return FpResponseValue(succ=recv.succ, code=recv.code, value=from_bytes(data=recv.pack[0:4]))
def _detector_process(self) -> None:
"""
Pulls periodically the state of the TOUT signal connected to CTS. If the finger is detected then an event is
emitted.
"""
# Do this periodically
while self._df_is_active:
# Only execute core if device IS connected...
if self.stream.device.is_open:
state = self.stream.device.serial.cts
if state != self._df_state:
self._df_state = state
if self._df_state:
embu.SDK_LOG.info('Finger pressed sensor!')
embu.SDK_TP.enqueue(task=embu.SimpleThreadTask(
name=f'{self.__class__.__name__}.on_finger_pressed',
task=self.on_finger_pressed.emit
))
else:
embu.SDK_LOG.info('Finger released sensor!')
embu.SDK_TP.enqueue(task=embu.SimpleThreadTask(
name=f'{self.__class__.__name__}.on_finger_released',
task=self.on_finger_released.emit
))
time.sleep(self._df_period)
# Inform finished
self._df_finished = True
def _command_get(self,
command: FpCommand, packet: bytearray = bytearray(),
data_wait: bool = False
) -> FpResponseGet:
"""
Use this function when need to get parameters / data from to the device.
:param FpCommand command: Command ID.
:param bytearray packet: Command packet.
:param bool data_wait: True if waiting for data, False otherwise.
:return: Fingerprint get response.
:rtype: FpResponseGet
"""
tim = embu.Timer()
data_ok = False
data_buff = bytearray()
def wait_ack_logic(item: FpPacket) -> bool:
"""
Wait for ACK.
"""
is_ack = (item.pid == FpPID.ACK)
return is_ack
def wait_data_logic(item: FpPacket) -> None:
"""
Wait for data to be received.
"""
nonlocal data_buff, data_ok, tim
is_data = (item.pid == FpPID.DATA)
is_end = (item.pid == FpPID.END_OF_DATA)
if is_data or is_end:
tim.start()
data_buff.extend(item.packet)
if is_end:
data_ok = True
# Prepare data reception
if data_wait:
self.on_receive += wait_data_logic
# Transmit packet and wait response
send = FpPacket(address=self._addr, pid=FpPID.COMMAND, packet=bytearray([command]) + packet)
recv = self.transmit(send=send, logic=wait_ack_logic)
# Check response type, command and possible errors
if not isinstance(recv, FpPacket):
raise self.Error(message='Unable to get the response packet', code=FpError.ERROR_PACKET_RECEPTION)
if recv.pid != FpPID.ACK:
raise self.Error(message='The received packet is not an ACK!', code=FpError.ERROR_PACKET_FAULTY)
pack = recv.packet[1:]
code = FpError(recv.packet[0])
self._code_check(code=code)
# Wait for data if required
if data_wait:
tim.start()
while not data_ok and (tim.elapsed() < self._timeout):
time.sleep(0.01)
self.on_receive -= wait_data_logic
if not data_ok:
raise self.Error('Timeout while waiting for data.', code=FpError.ERROR_TIMEOUT)
# Check and return
succ = code in [FpError.SUCCESS, FpError.HANDSHAKE_SUCCESS]
return FpResponseGet(succ=succ, code=code, pack=pack, data=data_buff)
def _command_set(self,
command: FpCommand, packet: bytearray = bytearray(),
data: bytearray = bytearray()
) -> FpResponseSet:
"""
Use this function when need to set parameters / data to the device.
:param FpCommand command: Command ID.
:param bytearray packet: Command packet.
:param bytearray data: If not empty this data will be sent after successful command.
:return: Fingerprint set response.
:rtype: FpResponseSet
"""
# Send command
recv = self._command_get(command=command, packet=packet)
# Send data, if required
if data and recv.succ:
data_size = len(data)
pack_size = self.packet_size.to_int()
pack_num = (data_size // pack_size) + int((data_size % pack_size) > 0)
end = 0
send = FpPacket(address=self._addr, pid=FpPID.DATA)
for idx in range(pack_num - 1):
start = idx * pack_size
end = start + pack_size
send.packet = data[start:end]
self.transmit(send=send)
send.pid = FpPID.END_OF_DATA
send.packet = data[end:]
self.transmit(send=send)
return FpResponseSet(succ=recv.succ, code=recv.code)
def _template_manage(self, buffer: tp.Union[int, FpBufferID], index: int = None, save: bool = True) -> FpResponseValue:
"""
Save/load a template to/from the device database.
:param Union[int, FpBufferID] buffer: Buffer ID.
:param int index: Position on the | |
# Do not edit this file; it was automatically generated.
import ctypes
import numpy
from nidaqmx._lib import lib_importer, ctypes_byte_str, c_bool32
from nidaqmx.scale import Scale
from nidaqmx.errors import (
check_for_error, is_string_buffer_too_small, is_array_buffer_too_small)
from nidaqmx._task_modules.channels.channel import Channel
from nidaqmx.constants import (
ConstrainedGenMode, DataTransferActiveTransferMode, Edge, FrequencyUnits,
Level, OutputDataTransferCondition, TimeUnits, UsageTypeCO)
class COChannel(Channel):
"""
Represents one or more counter output virtual channels and their properties.
"""
__slots__ = []
def __repr__(self):
return 'COChannel(name={0})'.format(self._name)
@property
def co_auto_incr_cnt(self):
"""
int: Specifies a number of timebase ticks by which to increase
the time spent in the idle state for each successive pulse.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetCOAutoIncrCnt
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_auto_incr_cnt.setter
def co_auto_incr_cnt(self, val):
cfunc = lib_importer.windll.DAQmxSetCOAutoIncrCnt
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_uint]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_auto_incr_cnt.deleter
def co_auto_incr_cnt(self):
cfunc = lib_importer.windll.DAQmxResetCOAutoIncrCnt
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_constrained_gen_mode(self):
"""
:class:`nidaqmx.constants.ConstrainedGenMode`: Specifies
constraints to apply when the counter generates pulses.
Constraining the counter reduces the device resources
required for counter operation. Constraining the counter can
also allow additional analog or counter tasks on the device
to run concurrently. For continuous counter tasks, NI-DAQmx
consumes no device resources when the counter is
constrained. For finite counter tasks, resource use
increases with the frequency regardless of the constraint
mode. However, fixed frequency constraints significantly
reduce resource usage, and fixed duty cycle constraint
marginally reduces it.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetCOConstrainedGenMode
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return ConstrainedGenMode(val.value)
@co_constrained_gen_mode.setter
def co_constrained_gen_mode(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetCOConstrainedGenMode
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_constrained_gen_mode.deleter
def co_constrained_gen_mode(self):
cfunc = lib_importer.windll.DAQmxResetCOConstrainedGenMode
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_count(self):
"""
int: Indicates the current value of the count register.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetCOCount
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@property
def co_ctr_timebase_active_edge(self):
"""
:class:`nidaqmx.constants.Edge`: Specifies whether a timebase
cycle is from rising edge to rising edge or from falling
edge to falling edge.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetCOCtrTimebaseActiveEdge
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return Edge(val.value)
@co_ctr_timebase_active_edge.setter
def co_ctr_timebase_active_edge(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetCOCtrTimebaseActiveEdge
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_ctr_timebase_active_edge.deleter
def co_ctr_timebase_active_edge(self):
cfunc = lib_importer.windll.DAQmxResetCOCtrTimebaseActiveEdge
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_ctr_timebase_dig_fltr_enable(self):
"""
bool: Specifies whether to apply the pulse width filter to the
signal.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetCOCtrTimebaseDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_ctr_timebase_dig_fltr_enable.setter
def co_ctr_timebase_dig_fltr_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetCOCtrTimebaseDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_ctr_timebase_dig_fltr_enable.deleter
def co_ctr_timebase_dig_fltr_enable(self):
cfunc = lib_importer.windll.DAQmxResetCOCtrTimebaseDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_ctr_timebase_dig_fltr_min_pulse_width(self):
"""
float: Specifies in seconds the minimum pulse width the filter
recognizes.
"""
val = ctypes.c_double()
cfunc = (lib_importer.windll.
DAQmxGetCOCtrTimebaseDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_ctr_timebase_dig_fltr_min_pulse_width.setter
def co_ctr_timebase_dig_fltr_min_pulse_width(self, val):
cfunc = (lib_importer.windll.
DAQmxSetCOCtrTimebaseDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_double]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_ctr_timebase_dig_fltr_min_pulse_width.deleter
def co_ctr_timebase_dig_fltr_min_pulse_width(self):
cfunc = (lib_importer.windll.
DAQmxResetCOCtrTimebaseDigFltrMinPulseWidth)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_ctr_timebase_dig_fltr_timebase_rate(self):
"""
float: Specifies in hertz the rate of the pulse width filter
timebase. NI-DAQmx uses this value to compute settings for
the filter.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetCOCtrTimebaseDigFltrTimebaseRate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_ctr_timebase_dig_fltr_timebase_rate.setter
def co_ctr_timebase_dig_fltr_timebase_rate(self, val):
cfunc = lib_importer.windll.DAQmxSetCOCtrTimebaseDigFltrTimebaseRate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_double]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_ctr_timebase_dig_fltr_timebase_rate.deleter
def co_ctr_timebase_dig_fltr_timebase_rate(self):
cfunc = lib_importer.windll.DAQmxResetCOCtrTimebaseDigFltrTimebaseRate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_ctr_timebase_dig_fltr_timebase_src(self):
"""
str: Specifies the input terminal of the signal to use as the
timebase of the pulse width filter.
"""
cfunc = lib_importer.windll.DAQmxGetCOCtrTimebaseDigFltrTimebaseSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_char_p, ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, self._name, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@co_ctr_timebase_dig_fltr_timebase_src.setter
def co_ctr_timebase_dig_fltr_timebase_src(self, val):
cfunc = lib_importer.windll.DAQmxSetCOCtrTimebaseDigFltrTimebaseSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes_byte_str]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_ctr_timebase_dig_fltr_timebase_src.deleter
def co_ctr_timebase_dig_fltr_timebase_src(self):
cfunc = lib_importer.windll.DAQmxResetCOCtrTimebaseDigFltrTimebaseSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_ctr_timebase_dig_sync_enable(self):
"""
bool: Specifies whether to synchronize recognition of
transitions in the signal to the internal timebase of the
device.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetCOCtrTimebaseDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_ctr_timebase_dig_sync_enable.setter
def co_ctr_timebase_dig_sync_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetCOCtrTimebaseDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_ctr_timebase_dig_sync_enable.deleter
def co_ctr_timebase_dig_sync_enable(self):
cfunc = lib_importer.windll.DAQmxResetCOCtrTimebaseDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_ctr_timebase_master_timebase_div(self):
"""
int: Specifies the divisor for an external counter timebase. You
can divide the counter timebase in order to generate slower
signals without causing the count register to roll over.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetCOCtrTimebaseMasterTimebaseDiv
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_ctr_timebase_master_timebase_div.setter
def co_ctr_timebase_master_timebase_div(self, val):
cfunc = lib_importer.windll.DAQmxSetCOCtrTimebaseMasterTimebaseDiv
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_uint]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_ctr_timebase_master_timebase_div.deleter
def co_ctr_timebase_master_timebase_div(self):
cfunc = lib_importer.windll.DAQmxResetCOCtrTimebaseMasterTimebaseDiv
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_ctr_timebase_rate(self):
"""
float: Specifies in Hertz the frequency of the counter timebase.
Specifying the rate of a counter timebase allows you to
define output pulses in seconds rather than in ticks of the
timebase. If you use an external timebase and do not specify
the rate, you can | |
import numpy as np
from collections import defaultdict as ddict
from ptsnet.arrays import Table2D, Table, ObjArray
from ptsnet.parallel.partitioning import even, get_partition
from ptsnet.simulation.constants import MEM_POOL_POINTS, PIPE_START_RESULTS, PIPE_END_RESULTS, NODE_RESULTS, CLOSED_PROTECTION_RESULTS, POINT_PROPERTIES, G
from ptsnet.utils.data import is_array
from ptsnet.arrays.selectors import SelectorSet
from ptsnet.simulation.funcs import run_general_junction, run_interior_step, run_pump_step, run_valve_step, run_open_protections, run_closed_protections
from ptsnet.profiler.profiler import Profiler
class Worker:
def __init__(self, **kwargs):
self.send_queue = None
self.recv_queue = None
self.router = kwargs['router']
self.wn = kwargs['wn']
self.ss = kwargs['ss']
self.global_where = kwargs['where']
self.time_step = kwargs['time_step']
self.time_steps = kwargs['time_steps']
self.mem_pool_points = None
self.point_properties = None
self.num_nodes = 0 # number of nodes in worker
self.num_start_pipes = 0 # number of start pipes in worker
self.num_end_pipes = 0 # number of end pipes in worker
self.num_jip_nodes = 0 # number of just-in-pipes junction nodes in worker
self.num_open_protections = 0 # number of open surge protections
self.num_closed_protections = 0 # number of closed surge protections
self.where = SelectorSet(['points', 'pipes', 'nodes', 'valves', 'pumps', 'open_protections', 'closed_protections'])
self.processors = even(kwargs['num_points'], self.router['main'].size)
self.is_innactive = False
innactive_processors = np.empty(self.router['main'].size, dtype=bool)
self.results = {}
self.profiler_on = kwargs['profiler_on']
self.profiler = Profiler(self.router['main'].rank, is_on = self.profiler_on)
###
self.profiler.start('get_partition')
self.partition = get_partition(
self.processors, self.router['main'].rank, self.global_where, self.ss,
self.wn, self.router['main'].size, kwargs['inpfile'])
self.profiler.stop('get_partition')
###
###
self.profiler.start('check_processor_innactivity')
if self.partition is None:
self.is_innactive = True
self.router['main'].Alltoall(np.ones(self.router['main'].size, dtype=bool)*self.is_innactive, innactive_processors)
if np.any(innactive_processors):
self.is_innactive = True
self.profiler.stop('check_processor_innactivity')
###
raise SystemError(" Partitioning is innecficient due to unused processor(s), try executing the parallel routine with less processors")
self.profiler.stop('check_processor_innactivity')
###
self.points = self.partition['points']['global_idx']
self.num_points = len(self.points) # ponts assigned to the worker
self.local_points = np.arange(self.num_points)
###
self.profiler.start('_create_selectors')
self._create_selectors()
self.profiler.stop('_create_selectors')
###
###
self.profiler.start('_define_worker_comm_queues')
self._define_worker_comm_queues()
self.profiler.stop('_define_worker_comm_queues')
###
###
self.profiler.start('_define_dist_graph_comm')
self._define_dist_graph_comm()
self.profiler.stop('_define_dist_graph_comm')
###
self._comm_buffer_head = []
self._recv_points = []
for r in self.recv_queue.values:
self._comm_buffer_head.append(np.zeros(len(r)))
self._recv_points.extend(r)
self._comm_buffer_flow = np.array(self._comm_buffer_head, dtype = object)
self._comm_buffer_head = np.array(self._comm_buffer_head, dtype = object)
###
self.profiler.start('_allocate_memory')
if self.router['main'].rank == 0:
self.local_to_global = {}
self._allocate_memory()
self.profiler.stop('_allocate_memory')
###
###
self.profiler.start('_load_initial_conditions')
self._load_initial_conditions()
self.profiler.stop('_load_initial_conditions')
###
def _allocate_memory(self):
self.mem_pool_points = Table2D(MEM_POOL_POINTS, self.num_points, 2)
self.point_properties = Table(POINT_PROPERTIES, self.num_points)
if self.num_nodes > 0:
self.results['node'] = Table2D(NODE_RESULTS, self.num_nodes, self.time_steps,
labels = self.ss['node'].labels[self.where.nodes['all_to_points',]])
are_my_uboundaries = self.global_where.points['are_uboundaries'] \
[self.processors[self.global_where.points['are_uboundaries']] == self.router['main'].rank]
self.where.points['are_my_uboundaries'] = self.local_points[np.isin(self.points, are_my_uboundaries)]
are_my_dboundaries = self.global_where.points['are_dboundaries'] \
[self.processors[self.global_where.points['are_dboundaries']] == self.router['main'].rank]
self.where.points['are_my_dboundaries'] = self.local_points[np.isin(self.points, are_my_dboundaries)]
ppoints_start = self.points[self.where.points['are_my_dboundaries']]
ppoints_end = self.points[self.where.points['are_my_uboundaries']]
pipes_start = self.global_where.points['to_pipes'][ppoints_start]
pipes_end = self.global_where.points['to_pipes'][ppoints_end]
self.num_start_pipes = len(ppoints_start)
self.num_end_pipes = len(ppoints_end)
if self.num_start_pipes > 0:
self.results['pipe.start'] = Table2D(PIPE_START_RESULTS, len(ppoints_start), self.time_steps, labels = self.ss['pipe'].labels[pipes_start])
if self.num_end_pipes > 0:
self.results['pipe.end'] = Table2D(PIPE_END_RESULTS, len(ppoints_end), self.time_steps, labels = self.ss['pipe'].labels[pipes_end])
# Root processor gathers indexes to facilitate reading results
node_indexes = self.router['main'].gather(self.where.nodes['all_to_points',], root = 0)
pipe_start_indexes = self.router['main'].gather(pipes_start, root = 0)
pipe_end_indexes = self.router['main'].gather(pipes_end, root = 0)
if self.router['main'].rank == 0:
node_indexes = np.concatenate(node_indexes)
pipe_start_indexes = np.concatenate(pipe_start_indexes)
pipe_end_indexes = np.concatenate(pipe_end_indexes)
node_labels = self.ss['node'].labels[node_indexes]
pipe_start_labels = self.ss['pipe'].labels[pipe_start_indexes]
pipe_end_labels = self.ss['pipe'].labels[pipe_end_indexes]
self.local_to_global['node'] = {l : i for i, l in enumerate(node_labels)}
self.local_to_global['pipe.start'] = {l : i for i, l in enumerate(pipe_start_labels)}
self.local_to_global['pipe.end'] = {l : i for i, l in enumerate(pipe_end_labels)}
def _define_dist_graph_comm(self):
self.router.add_communicator('local', self.router['main'].Create_dist_graph_adjacent(
sources = self.recv_queue.labels,
destinations = self.send_queue.labels,
sourceweights = list(map(len, self.recv_queue.values)),
destweights = list(map(len, self.send_queue.values))))
def _define_worker_comm_queues(self):
local_points = self.partition['points']['local_idx']
pp = self.processors[self.points]
pp_idx = np.where(pp != self.router['main'].rank)[0]
ppoints = self.points[pp_idx]
# Define receive queue
self.recv_queue = ObjArray()
for i, p in enumerate(pp_idx):
if not pp[p] in self.recv_queue.indexes:
self.recv_queue[pp[p]] = []
self.recv_queue[pp[p]].append(ppoints[i])
# Define send queue
self.send_queue = ObjArray()
uboundaries = self.points[self.where.points['are_uboundaries']]
dboundaries = self.points[self.where.points['are_dboundaries']]
inner = self.points[self.where.points['are_inner']]
for p in self.recv_queue.labels:
self.recv_queue[p] = np.sort(self.recv_queue[p])
urq = np.isin(self.recv_queue[p], uboundaries)
drq = np.isin(self.recv_queue[p], dboundaries)
irq = np.isin(self.recv_queue[p], inner)
extra_b = np.append(self.recv_queue[p][urq] - 1, self.recv_queue[p][drq] + 1)
extra_i = np.append(self.recv_queue[p][irq] - 1, self.recv_queue[p][irq] + 1)
extra = np.append(extra_b, extra_i)
reduced_extra = extra[np.isin(extra, self.points)]
real_extra = [local_points[r] for r in reduced_extra[self.processors[reduced_extra] == self.router['main'].rank]] # local idx
if len(real_extra) > 0:
if not p in self.send_queue.indexes:
self.send_queue[p] = []
self.send_queue[p].extend(real_extra)
self.recv_queue[p] = np.sort([local_points[r] for r in self.recv_queue[p]]) # convert to local idx
for p in self.send_queue.labels:
self.send_queue[p] = np.sort(np.unique(self.send_queue[p]))
def _create_selectors(self):
jip_nodes = self.partition['nodes']['global_idx']
lpoints = self.partition['points']['local_idx']
self.where.points['just_in_pipes'] = np.array([lpoints[npoint] for npoint in self.partition['nodes']['points']]).astype(int)
self.where.points['are_tanks'] = np.where(np.isin(self.points, self.partition['tanks']['points']))[0]
self.where.points['are_reservoirs'] = np.where(np.isin(self.points, self.partition['reservoirs']['points']))[0]
njip = np.cumsum(self.partition['nodes']['context'])
self.where.nodes['just_in_pipes',] = njip[:-1]
self.where.nodes['to_points'] = self.where.points['just_in_pipes'][self.where.nodes['just_in_pipes',][:-1]]
nonpipe = np.isin(self.global_where.points['are_boundaries'], self.global_where.points['are_valve'])
nonpipe = nonpipe | np.isin(self.global_where.points['are_boundaries'], self.global_where.points['are_pump'])
local_points = np.isin(self.global_where.points['are_boundaries'], self.points[self.processors[self.points] == self.router['main'].rank])
dboundary = np.zeros(len(nonpipe), dtype=bool); dboundary[::2] = 1
uboundary = np.zeros(len(nonpipe), dtype=bool); uboundary[1::2] = 1
# ---------------------------
self.where.points['are_uboundaries'] = np.where(np.isin(self.points, self.global_where.points['are_uboundaries']))[0]
self.where.points['are_dboundaries'] = np.where(np.isin(self.points, self.global_where.points['are_dboundaries']))[0]
self.where.points['are_inner'] = np.setdiff1d(np.arange(self.num_points, dtype=np.int), \
np.concatenate((self.where.points['are_uboundaries'], self.where.points['are_dboundaries'])))
# ---------------------------
n_pipes = len(self.global_where.points['are_uboundaries'])
ppipes_idx = np.arange(n_pipes, dtype=int)
ppipes = np.zeros(n_pipes*2, dtype=int)
ppipes[::2] = ppipes_idx; ppipes[1::2] = ppipes_idx
selector_dboundaries = dboundary & (~nonpipe) & local_points
self.where.points['jip_dboundaries'] = np.where(np.isin(self.points, self.global_where.points['are_boundaries'][selector_dboundaries]))[0]
self.where.points['jip_dboundaries',] = ppipes[selector_dboundaries]
selector_uboundaries = uboundary & (~nonpipe) & local_points
self.where.points['jip_uboundaries'] = np.where(np.isin(self.points, self.global_where.points['are_boundaries'][selector_uboundaries]))[0]
self.where.points['jip_uboundaries',] = ppipes[selector_uboundaries]
# ---------------------------
diff = np.diff(njip)
self.where.points['just_in_pipes',] = np.array([i for i in range(len(jip_nodes)) for j in range(diff[i])], dtype = int)
# ---------------------------
self.where.points['start_valve'] = np.array([lpoints[spv] for spv in self.partition['inline_valves']['start_points']]).astype(int)
self.where.points['end_valve'] = np.array([lpoints[epv] for epv in self.partition['inline_valves']['end_points']]).astype(int)
self.where.points['start_valve',] = self.partition['inline_valves']['global_idx']
self.where.points['start_pump'] = np.array([lpoints[spp] for spp in self.partition['inline_pumps']['start_points']]).astype(int)
self.where.points['end_pump'] = np.array([lpoints[epv] for epv in self.partition['inline_pumps']['end_points']]).astype(int)
self.where.points['start_pump',] = self.partition['inline_pumps']['global_idx']
self.where.points['single_valve'] = np.array([lpoints[svp] for svp in self.partition['single_valves']['points']]).astype(int)
self.where.points['single_valve',] = self.partition['single_valves']['global_idx']
self.where.points['single_pump'] = np.array([lpoints[spp] for spp in self.partition['single_pumps']['points']]).astype(int)
self.where.points['single_pump',] = self.partition['single_pumps']['global_idx']
# ---------------------------
nodes = []; node_points = []
nodes += list(self.partition['nodes']['global_idx'])
node_points += list(self.partition['nodes']['points'][self.where.nodes['just_in_pipes',]])
nodes += list(self.partition['tanks']['global_idx'])
node_points += list(self.partition['tanks']['points'])
nodes += list(self.partition['reservoirs']['global_idx'])
node_points += list(self.partition['reservoirs']['points'])
nodes += list(self.ss['valve'].start_node[self.partition['inline_valves']['global_idx']])
node_points += list(self.partition['inline_valves']['start_points'])
nodes += list(self.ss['valve'].end_node[self.partition['inline_valves']['global_idx']])
node_points += list(self.partition['inline_valves']['end_points'])
nodes += list(self.ss['pump'].start_node[self.partition['inline_pumps']['global_idx']])
node_points += list(self.partition['inline_pumps']['start_points'])
nodes += list(self.ss['pump'].end_node[self.partition['inline_pumps']['global_idx']])
node_points += list(self.partition['inline_pumps']['end_points'])
nodes += list(self.ss['valve'].start_node[self.partition['single_valves']['global_idx']])
node_points += list(self.partition['single_valves']['points'])
nodes += list(self.ss['pump'].end_node[self.partition['single_pumps']['global_idx']])
node_points += list(self.partition['single_pumps']['points'])
nodes = np.array(nodes)
node_points = np.array(node_points)
if len(nodes) > 0:
atp = np.array([lpoints[npoint] for npoint in node_points]).astype(int)
_, idx_unique = np.unique(nodes, return_index=True)
sorted_idx = np.sort(idx_unique)
self.where.nodes['all_to_points'] = atp[sorted_idx]
self.where.nodes['all_to_points',] = nodes[sorted_idx]
self.num_nodes = len(self.where.nodes['all_to_points',])
self.where.nodes['all_just_in_pipes'] = self.partition['nodes']['global_idx']
self.num_jip_nodes = len(self.where.nodes['all_just_in_pipes'])
# ---------------------------
if self.ss['open_protection']:
ssprotection = np.isin(self.partition['nodes']['points'], self.global_where.points['start_open_protection'])
esprotection = np.isin(self.partition['nodes']['points'], self.global_where.points['end_open_protection'])
self.where.points['start_open_protection'] = np.array([lpoints[npoint] for npoint in self.partition['nodes']['points'][ssprotection]]).astype(int)
self.where.points['end_open_protection'] = np.array([lpoints[npoint] for npoint in self.partition['nodes']['points'][esprotection]]).astype(int)
self.num_open_protections = len(self.where.points['start_open_protection'])
if self.ss['closed_protection']:
ssprotection = np.isin(self.partition['nodes']['points'], self.global_where.points['start_closed_protection'])
esprotection = np.isin(self.partition['nodes']['points'], self.global_where.points['end_closed_protection'])
self.where.points['start_closed_protection'] = np.array([lpoints[npoint] for npoint in self.partition['nodes']['points'][ssprotection]]).astype(int)
self.where.points['end_closed_protection'] = np.array([lpoints[npoint] for npoint in self.partition['nodes']['points'][esprotection]]).astype(int)
self.num_closed_protections = len(self.where.points['start_closed_protection'])
def _fix_protection_indexes(self, protection_type):
if (self.ss[f'{protection_type}_protection']):
for ii in range(self.ss[f'{protection_type}_protection'].shape[1]):
p1 = self.where.points[f'start_{protection_type}_protection']
p2 = self.where.points[f'end_{protection_type}_protection']
if self.point_properties.has_plus[p1] == 0: # p1 is not associated with plus characteristic
self.where.points[f'end_{protection_type}_protection'] = p1
self.where.points[f'start_{protection_type}_protection'] = p2
def define_initial_conditions_for_points(self, points, pipe, start, end):
q = self.ss['pipe'].flowrate[pipe]
self.mem_pool_points.flowrate[start:end,0] = q
start_node = self.ss['pipe'].start_node[pipe]
start_point = self.global_where.points['are_boundaries'][pipe*2]
npoints = points - start_point # normalized
shead = self.ss['node'].head[start_node]
self.point_properties.B[start:end] = self.ss['pipe'].wave_speed[pipe] / (G * self.ss['pipe'].area[pipe])
self.point_properties.R[start:end] = self.ss['pipe'].ffactor[pipe] * self.ss['pipe'].dx[pipe] / \
(2 * G * self.ss['pipe'].diameter[pipe] * self.ss['pipe'].area[pipe] ** 2)
per_unit_hl = self.ss['pipe'].head_loss[pipe] / self.ss['pipe'].segments[pipe]
self.mem_pool_points.head[start:end,0] = shead - per_unit_hl*npoints
def _load_initial_conditions(self):
points = self.partition['points']['global_idx']
pipes = self.global_where.points['to_pipes'][points]
diff = np.where(np.diff(pipes) >= 1)[0] + 1
if len(diff) > 0:
for i in range(len(diff)+1):
if i == 0:
start = 0
end = diff[i]
elif i == len(diff):
start = diff[i-1]
end = None
else:
start = diff[i-1]
end = diff[i]
self.define_initial_conditions_for_points(points[start:end], pipes[start], start, end)
else:
self.define_initial_conditions_for_points(points, pipes[0], 0, None)
self.point_properties.has_plus[self.where.points['are_uboundaries']] = 1
self.point_properties.has_minus[self.where.points['are_dboundaries']] = 1
self.point_properties.has_plus[self.where.points['are_inner']] = 1
self.point_properties.has_minus[self.where.points['are_inner']] = 1
self._fix_protection_indexes('open')
self._fix_protection_indexes('closed')
if self.num_start_pipes > 0:
self.results['pipe.start'].flowrate[:,0] = self.mem_pool_points.flowrate[self.where.points['are_my_dboundaries'], 0]
if self.num_end_pipes > 0:
self.results['pipe.end'].flowrate[:,0] = self.mem_pool_points.flowrate[self.where.points['are_my_uboundaries'], 0]
if self.num_nodes > 0:
self.results['node'].head[:, 0] = self.mem_pool_points.head[self.where.nodes['all_to_points'], 0]
self.results['node'].leak_flow[:, 0] = \
self.ss['node'].leak_coefficient[self.where.nodes['all_to_points',]] * \
np.sqrt(self.ss['node'].pressure[self.where.nodes['all_to_points',]])
self.results['node'].demand_flow[:, 0] = \
self.ss['node'].demand_coefficient[self.where.nodes['all_to_points',]] * \
np.sqrt(self.ss['node'].pressure[self.where.nodes['all_to_points',]])
# Define initial conditions for surge protections
if self.num_closed_protections > 0:
H0 = self.mem_pool_points.head[:,0]
m = 1.2
Hb = 10.3 # barometric pressure [mH2O]
self.ss['closed_protection'].HT0[:] = self.ss['closed_protection'].water_level
self.ss['closed_protection'].HA[:] = self.ss['node'].head[self.ss['closed_protection'].node] - self.ss['closed_protection'].water_level + Hb # air pressure head
self.ss['closed_protection'].VA[:] = self.ss['closed_protection'].area*(self.ss['closed_protection'].height-self.ss['closed_protection'].water_level) # air volume
self.ss['closed_protection'].C[:] = self.ss['closed_protection'].HA * self.ss['closed_protection'].VA**m
# Allocate space for results
# self.results['closed_protection'] = Table2D(CLOSED_PROTECTION_RESULTS, self.num_closed_protections, self.time_steps, self)
def exchange_data(self, t):
t1 = t % 2; t0 = 1 - t1
send_flow = []
send_head = []
for v in self.send_queue.values:
send_flow.append(self.mem_pool_points.flowrate[v,t1])
send_head.append(self.mem_pool_points.head[v,t1])
self._comm_buffer_flow = self.router['local'].neighbor_alltoall(send_flow)
self._comm_buffer_head = self.router['local'].neighbor_alltoall(send_head)
self.mem_pool_points.flowrate[self._recv_points, t1] = [item for sublist | |
import csv
import json
import pymongo
import math
import numpy as np
import sys
sys.path.append(".")
sys.path.append("..")
from nlppack.word_cut import parse
from data.item_feature import blacklist
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["paopao"]
words = mydb['dicts'].find()
words = [x for x in words]
word_map = dict([[x['name'], x['alias']] for x in words])
items = mydb['items'].find()
items = [x for x in items]
types = list(set([x['type'] for x in items]))
abbr = {}
for x in items:
if x['type'] not in abbr:
abbr[x['type']] = x['abbrtype']
typed_item_index = dict([[t, [x['index'] for x in items if x['type'] == t]] for t in types])
special_group = '红金白黑'
item_index = dict([[x['name'], x['index']] for x in items])
item_limit = dict([[x['name'], x.get('limit', 1)] for x in items])
item_type = dict([[x['name'], x['type']] for x in items])
item_tag = dict([[x['name'], x.get('tag', '')] for x in items])
item_set = set([x['name'] for x in items])
items = [x for x in items if x['index'] >= 0]
v_limit = [item_limit[x['name']] for x in items]
accounts = mydb['accounts'].find()
def find_index(x):
if x in special_group:
x = x + '发'
return item_index.get(x, -1)
accounts = [x for x in accounts]
def achievePointF(x):
if x > 0 and x < 10:
x *= 10000
elif x == -1 and x < 1000:
x = 30000
return x
def extract_vector(s, text, dirty=False):
if dirty:
for x in blacklist:
if x in text:
return None, None, None, None
x = []
enable_cl5 = True
last = ''
for i in s:
if type(i) == float or type(i) == int:
x.append(i)
elif i == '':
pass
else:
if ('复刻' in i or '下架' in i):
enable_cl5 = False
elif '五限' in i:
enable_cl5 = True
if i[0] == '-':
x.append(i)
else:
if i in word_map:
has_cl5 = False
for j in word_map[i]:
if item_type.get(j, '') == 'cl5':
has_cl5 = True
if has_cl5:
if enable_cl5:
x += [j for j in word_map[i]
if item_type.get(j, '') == 'cl5']
else:
x += [j for j in word_map[i]
if item_type.get(j, '') != 'cl5']
else:
for j in word_map[i]:
x.append(j)
last = i
price = -1
# trim head
for i in range(len(x)):
if type(x[i]) == float or type(x[i]) == str and x[i] != '' and x[i][0] != '-':
x = x[i:]
break
forward_range = 12
backward_range = 5
school = None
body = None
# find school & body
z = x[:forward_range]
if len(x) > forward_range:
delta = len(x) - forward_range
if delta > backward_range:
delta = backward_range
z.append('')
for i in range(0, delta):
z.append(x[-i - 1])
pindex = -1
for i in range(len(z)):
if (type(z[i]) == float and (i > 0 and price == -1 and 'body' in item_type.get(z[i - 1], '') and
(i + 1 == len(z) or z[i + 1] != '资历' or i + 2 < len(z) and type(z[i + 2]) == float and z[i + 2] > 10000))):
price = z[i]
pindex = i
for i in range(len(z)):
if (type(z[i]) == float and (i > 0 and price == -1 and i + 1 < len(z) and 'school' in item_type.get(z[i + 1], '') and
(i == 0 or z[i - 1] != '资历' or i > 1 and type(z[i - 2]) == float and z[i - 2] > 10000))):
price = z[i]
pindex = i
elif type(z[i]) == str:
if body == None and 'body' in item_type.get(z[i], ''):
body = z[i]
elif school == None and 'school' in item_type.get(z[i], ''):
school = z[i]
if price < 100:
for i in range(len(z)):
if type(z[i]) == float and z[i] >= 100:
if i == 0 or z[i - 1] != '资历':
if i + 1 == len(z) or z[i + 1] != '资历':
price = z[i]
pindex = i
break
#print(body, school, price, x)
if dirty:
if body == None or school == None or price == -1 or price < 200 or s[0] == '出租':
return None, None, None, None
# remove price info
if pindex != -1:
if pindex > forward_range:
pindex = forward_range - pindex
x[pindex] = ''
v = [0] * len(items)
last_type = ''
last_item_type = ''
last_info = ''
last_val = ''
last_colors = ''
left = []
i = 0
while i < len(x):
curr_item_type = item_type.get(x[i], '')
curr_type = type(x[i])
curr_info = item_tag.get(x[i], '')
curr_val = x[i]
if curr_type == float:
if 'cnt' == last_item_type and (last_info == 'prefix' or last_info == 'both'):
if find_index(last_val) != -1:
k = find_index(last_val)
if curr_val < v_limit[k] * 2:
v[k] = curr_val
curr_item_type = ''
last_colors = ''
else:
if last_type == float:
if 'cnt' == curr_item_type and (curr_info == 'suffix' or curr_info == 'both'):
if find_index(curr_val) != -1:
k = find_index(curr_val)
if last_val < v_limit[k] * 2:
v[k] = last_val
curr_item_type = ''
elif curr_val not in item_set and len(curr_val) >= 2 and curr_val[1] in special_group and last_val < 50:
if find_index(curr_val[1]) != -1:
k = find_index(curr_val[1])
if last_val < v_limit[k] * 2:
v[k] = last_val
if len(curr_val) > 2 and curr_val[2:] in item_set:
x[i] = curr_val[2:]
continue
elif curr_val in item_set:
k = find_index(curr_val)
if k != -1:
v[k] += 1
elif curr_type == str and 'cnt' not in curr_item_type and curr_val in item_set:
k = find_index(curr_val)
if k != -1:
if last_colors == '':
v[k] += 1
else:
if item_type.get(curr_val[0], '') in '白黑金红蓝紫绿黄粉':
colors = last_colors + curr_val[0]
for c in colors:
for t in word_map.get(c + curr_val[1:], []):
k = find_index(t)
if k != -1:
v[k] += 1
else:
v[k] += len(last_colors)
elif 'cnt' not in curr_item_type:
# left.append(curr_val)
pass
flag = True
for k in curr_val:
if 'color' not in item_type.get(k, ''):
flag = False
break
if flag:
last_colors = last_colors + curr_val
else:
last_colors = ''
last_info = curr_info
last_item_type = curr_item_type
last_type = curr_type
last_val = curr_val
i += 1
v[item_index['资历']] = achievePointF(v[item_index['资历']])
for i in range(len(v)):
if v[i] > v_limit[i]:
v[i] = v_limit[i]
if v[item_index['资历']] >= 90000:
v[item_index['资历金']] = 1
if v[item_index['资历']] >= 100000:
v[item_index['资历红']] = 1
if v[item_index['盒子']] == 0:
v[item_index['盒子']] = sum(
[v[i] for i in typed_item_index['box'] + typed_item_index['boxn']])
cnt_terms = ['cl5', 'cln', 'cloak', 'adv', 'rhair', 'ghair', 'pat']
for term in cnt_terms:
tsum = sum([v[i] for i in typed_item_index[term]])
if v[item_index[abbr[term]]] == 0:
v[item_index[abbr[term]]] = tsum
if v[item_index['限量']] == 0:
v[item_index['限量']] = sum([v[i] for i in typed_item_index['cl6']]) + sum(
[v[i] for i in typed_item_index['cl7']]) + v[item_index['五限']] + v[item_index['盒子']]
if dirty:
if sum([v[i] for i in typed_item_index['school']]) >= 2 and sum([v[i] for i in typed_item_index['body']]) >= 2:
return None, None, None, None
for i in typed_item_index['school']:
v[i] = 0
for i in typed_item_index['body']:
v[i] = 0
v[item_index[school]] = 1
v[item_index[body]] = 1
for i in range(len(v)):
if v[i] > v_limit[i]:
v[i] = v_limit[i]
# if i in hiddenset:
# v[i] = 0
return price, school, body, v
def check_vector(v):
v[item_index['资历']] = achievePointF(v[item_index['资历']])
for i in range(len(v)):
if v[i] > v_limit[i]:
v[i] = v_limit[i]
if v[item_index['盒子']] == 0:
v[item_index['盒子']] = sum(
[v[i] for i in typed_item_index['box'] + typed_item_index['boxn']])
cnt_terms = ['cl5', 'cln', 'cloak', 'adv', 'rhair', 'ghair', 'pat']
for term in cnt_terms:
tsum = sum([v[i] for i in typed_item_index[term]])
if v[item_index[abbr[term]]] == 0:
v[item_index[abbr[term]]] = tsum
if v[item_index['限量']] == 0:
v[item_index['限量']] = sum([v[i] for i in typed_item_index['cl6']]) + sum(
[v[i] for i in typed_item_index['cl7']]) + v[item_index['五限']] + v[item_index['盒子']]
for i in range(len(v)):
if v[i] > v_limit[i]:
v[i] = v_limit[i]
return v
def set_school(v, school):
for i in typed_item_index['school']:
v[i] = 0
v[item_index[school]] = 1
def set_body(v, body):
for i in typed_item_index['body']:
v[i] = 0
v[item_index[body]] = 1
def extract(text, dirty=False):
s = parse(text, dirty)
r1, r2, r3, r4 = extract_vector(s, text, dirty)
return s, (r1, r2, r3, r4)
if __name__ == '__main__':
people = []
headerset = set()
existitems = mydb['infos'].find()
existitems = [x['url'] for x in existitems]
urlset = set(existitems)
history_v = {}
v_count = []
for i in range(len(items)):
v_count.append([])
total = 0
duplicate = 0
frequent = [0] * len(items)
counter = 0
for it in accounts:
counter += 1
content = it['unparsed']['content']
if it['url'] in urlset or content[:64] in headerset:
duplicate += 1
continue
headerset.add(content[:64])
| |
.. code-block:: python
# Group jobs by state point parameter 'a'.
for key, group in project.groupby('a'):
print(key, list(group))
# Find jobs where job.sp['a'] is 1 and group them
# by job.sp['b'] and job.sp['c'].
for key, group in project.find_jobs({'a': 1}).groupby(('b', 'c')):
print(key, list(group))
# Group by job.sp['d'] and job.document['count'] using a lambda.
for key, group in project.groupby(
lambda job: (job.sp['d'], job.document['count'])
):
print(key, list(group))
If `key` is None, jobs are grouped by identity (by id), placing one job
into each group.
Parameters
----------
key : str, iterable, or function
The state point grouping parameter(s) passed as a string, iterable of strings,
or a function that will be passed one argument, the job (Default value = None).
default :
A default value to be used when a given state point key is not present (must
be sortable).
"""
_filter = self._filter
if isinstance(key, str):
if default is None:
if _filter is None:
_filter = {key: {"$exists": True}}
else:
_filter = {'$and': [{key: {"$exists": True}}, _filter]}
def keyfunction(job):
"""Return job's state point value corresponding to the key.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
State point value corresponding to the key.
"""
return job.sp[key]
else:
def keyfunction(job):
"""Return job's state point value corresponding to the key.
Return default if key is not present.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
State point value corresponding to the key.
Default if key is not present.
"""
return job.sp.get(key, default)
elif isinstance(key, Iterable):
if default is None:
if _filter is None:
_filter = {k: {"$exists": True} for k in key}
else:
_filter = {'$and': [{k: {"$exists": True} for k in key}, _filter]}
def keyfunction(job):
"""Return job's state point value corresponding to the key.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
tuple
State point values.
"""
return tuple(job.sp[k] for k in key)
else:
def keyfunction(job):
"""Return job's state point value corresponding to the key.
Return default if key is not present.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
tuple
State point values.
"""
return tuple(job.sp.get(k, default) for k in key)
elif key is None:
# Must return a type that can be ordered with <, >
def keyfunction(job):
"""Return the job's id.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
str
The job's id.
"""
return str(job)
else:
# Pass the job document to a callable
keyfunction = key
return groupby(sorted(iter(JobsCursor(self._project, _filter, self._doc_filter)),
key=keyfunction), key=keyfunction)
def groupbydoc(self, key=None, default=None):
"""Group jobs according to one or more document values.
This method can be called on any :class:`~signac.contrib.project.JobsCursor` such as
the one returned by :meth:`~signac.Project.find_jobs` or by iterating over a
project.
Examples
--------
.. code-block:: python
# Group jobs by document value 'a'.
for key, group in project.groupbydoc('a'):
print(key, list(group))
# Find jobs where job.sp['a'] is 1 and group them
# by job.document['b'] and job.document['c'].
for key, group in project.find_jobs({'a': 1}).groupbydoc(('b', 'c')):
print(key, list(group))
# Group by whether 'd' is a field in the job.document using a lambda.
for key, group in project.groupbydoc(lambda doc: 'd' in doc):
print(key, list(group))
If `key` is None, jobs are grouped by identity (by id), placing one job
into each group.
Parameters
----------
key : str, iterable, or function
The state point grouping parameter(s) passed as a string, iterable of strings,
or a function that will be passed one argument, :meth:`~signac.job.Job.document`.
(Default value = None).
default :
A default value to be used when a given state point key is not present (must
be sortable).
"""
if isinstance(key, str):
if default is None:
def keyfunction(job):
"""Return job's document value corresponding to the key.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
Document value corresponding to the key.
"""
return job.document[key]
else:
def keyfunction(job):
"""Return job's document value corresponding to the key.
Return default if key is not present.
Parameters
----------
job : class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
Document value corresponding to the key.
Default if key is not present.
"""
return job.document.get(key, default)
elif isinstance(key, Iterable):
if default is None:
def keyfunction(job):
"""Return job's document value corresponding to the key.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
tuple
Document values.
"""
return tuple(job.document[k] for k in key)
else:
def keyfunction(job):
"""Return job's document value corresponding to the key.
Return default if key is not present.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
tuple
Document values.
"""
return tuple(job.document.get(k, default) for k in key)
elif key is None:
# Must return a type that can be ordered with <, >
def keyfunction(job):
"""Return the job's id.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
str
The job's id.
"""
return str(job)
else:
# Pass the job document to a callable
def keyfunction(job):
"""Return job's document value corresponding to the key.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Returns
-------
Document values.
"""
return key(job.document)
return groupby(sorted(iter(self), key=keyfunction), key=keyfunction)
def export_to(self, target, path=None, copytree=None):
"""Export all jobs to a target location, such as a directory or a (zipped) archive file.
See Also
--------
:meth:`~signac.Project.export_to` : For full details on how to use this function.
Parameters
----------
target : str
A path to a directory or archive file to export to.
path : str or callable
The path (function) used to structure the exported data space
(Default value = None).
copytree : callable
The function used for copying of directory tree structures.
Defaults to :func:`shutil.copytree`. Can only be used when the
target is a directory (Default value = None).
Returns
-------
dict
A dictionary that maps the source directory paths to the target
directory paths.
"""
from .import_export import export_jobs
return dict(export_jobs(jobs=list(self), target=target,
path=path, copytree=copytree))
def to_dataframe(self, sp_prefix='sp.', doc_prefix='doc.', usecols=None,
flatten=False):
"""Convert the selection of jobs to a pandas dataframe.
This function exports the job metadata to a
:py:class:`pandas.DataFrame`. All state point and document keys are
prefixed by default to be able to distinguish them.
Parameters
----------
sp_prefix : str, optional
Prefix state point keys with the given string. Defaults to "sp.".
doc_prefix : str, optional
Prefix document keys with the given string. Defaults to "doc.".
usecols : list-like or callable, optional
Used to select a subset of columns. If list-like, must contain
strings corresponding to the column names that should be included.
For example, ``['sp.a', 'doc.notes']``. If callable, the column
will be included if the function called on the column name returns
True. For example, ``lambda x: 'sp.' in x``. Defaults to ``None``,
which uses all columns from the state point and document. Note
that this filter is applied *after* the doc and sp prefixes are
added to the column names.
flatten : bool, optional
Whether nested state points or document keys should be flattened.
If True, ``{'a': {'b': 'c'}}`` becomes a column named ``a.b`` with
value ``c``. If False, it becomes a column named ``a`` with value
``{'b': 'c'}``. Defaults to ``False``.
Returns
-------
:class:`~pandas.DataFrame`
A pandas DataFrame with all job metadata.
"""
import pandas
if usecols is None:
def usecols(column):
return True
elif not callable(usecols):
included_columns = set(usecols)
def usecols(column):
return column in included_columns
def _flatten(d):
return dict(_nested_dicts_to_dotted_keys(d)) if flatten else d
def _export_sp_and_doc(job):
"""Prefix and filter state point and document keys.
Parameters
----------
job : :class:`~signac.contrib.job.Job`
The job instance.
Yields
------
tuple
tuple with prefixed state point or document key and values.
"""
for key, value in _flatten(job.sp).items():
prefixed_key = sp_prefix + key
if usecols(prefixed_key):
yield prefixed_key, value
for key, value in _flatten(job.doc).items():
prefixed_key = doc_prefix + key
if usecols(prefixed_key):
yield prefixed_key, value
return pandas.DataFrame.from_dict(
data={job._id: dict(_export_sp_and_doc(job)) for job in self},
orient='index').infer_objects()
def __repr__(self):
return '{type}(project={project}, filter={filter}, doc_filter={doc_filter})'.format(
type=self.__class__.__name__,
project=repr(self._project),
filter=repr(self._filter),
doc_filter=repr(self._doc_filter))
def _repr_html_jobs(self):
"""Jobs representation as HTML.
Returns
-------
str
HTML representation of jobs.
"""
html = ''
len_self = len(self)
try:
if len_self > 100:
| |
from django.forms import forms
from django.test.testcases import SimpleTestCase
from django.utils.safestring import mark_safe
from form_tags.templatetags.forms import (
fieldholder,
fieldholder_combined,
fieldholder_inline,
)
from .mixins import DummyFormMixin, SvgIconsMixin
class FormFieldholderTestCase(DummyFormMixin, SvgIconsMixin, SimpleTestCase):
maxDiff = None
def test_fieldholder(self):
html_output = fieldholder({}, self.form["char_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
def test_fieldholder_fieldwrapper(self):
html_output = fieldholder(
{}, self.form["char_field"], fieldwrapper_class=" test-class "
)
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput test-class">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
def test_fieldholder_label(self):
html_output = fieldholder({}, self.form["char_field"], label="Input field")
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Input field">Input field</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
html_output = fieldholder({}, self.form["char_field"], label_tag="")
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
html_output = fieldholder(
{}, self.form["char_field"], label=mark_safe("<strong>Char field</strong>")
)
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field"><strong>Char field</strong></label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
def test_fieldholder_class(self):
html_output = fieldholder(
{}, self.form["char_field"], fieldholder_class=" test-class "
)
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder test-class">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
def test_context(self):
html_output = fieldholder({"horizontal": True}, self.form["char_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder fieldholder--horizontal">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
self.form.add_error("char_field", forms.ValidationError("Err"))
html_output = fieldholder({}, self.form["char_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder error">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput error">
<input class="error input-field" id="id_char_field" name="char_field" type="text" value="abc" />
<ul class="errorlist"><li>Err</li></ul>
</div>
</div>
""",
)
html_output = fieldholder({"suppress_errors": True}, self.form["char_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
def test_errors(self):
self.form.add_error("char_field", forms.ValidationError("Err"))
html_output = fieldholder({}, self.form["char_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder error">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput error">
<input class="error input-field" id="id_char_field" name="char_field" type="text" value="abc" />
<ul class="errorlist"><li>Err</li></ul>
</div>
</div>
""",
)
html_output = fieldholder({}, self.form["char_field"], suppress_errors=True)
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
def test_fieldholder_help_text(self):
help_field = fieldholder({}, self.form["help_field"])
self.assertHTMLEqual(
help_field,
"""
<div class="fieldholder ">
<label for="id_help_field" title="Help field">Help field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_help_field" name="help_field" type="text" value="abc" />
<div class="helptext">Help Text</div>
</div>
</div>
""",
)
def test_fieldholder_integer(self):
html_output = fieldholder({}, self.form["int_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_int_field" title="Int field">Int field:</label>
<div class="fieldwrapper fieldwrapper--integerfield fieldwrapper--numberinput ">
<input class="input-field" id="id_int_field" name="int_field" type="number" value="1" />
</div>
</div>
""",
)
def test_fieldholder_radio(self):
html_output = fieldholder({}, self.form["radio_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_radio_field_0" title="Radio field">Radio field:</label>
<div class="fieldwrapper fieldwrapper--choicefield fieldwrapper--radioselect ">
<ul id="id_radio_field">
<li><label for="id_radio_field_0"><input checked="checked" class="" id="id_radio_field_0" name="radio_field" type="radio" value="1"> a</label></li>
<li><label for="id_radio_field_1"><input class="" id="id_radio_field_1" name="radio_field" type="radio" value="2"> b</label></li>
</ul>
</div>
</div>
""",
)
def test_fieldholder_checkbox(self):
html_output = fieldholder({}, self.form["checkbox_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_checkbox_field" title="Checkbox field">Checkbox field:</label>
<div class="fieldwrapper fieldwrapper--booleanfield fieldwrapper--checkboxinput ">
<label class="control checkbox">
<input checked class="" id="id_checkbox_field" name="checkbox_field" type="checkbox" />
<span class="control-indicator"></span>
<span class="control-label">Checkbox field</span>
</label>
</div>
</div>
""",
)
def test_fieldholder_date(self):
html_output = fieldholder({}, self.form["date_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_date_field" title="Date field">Date field:</label>
<div class="fieldwrapper fieldwrapper--datefield fieldwrapper--dateinput fieldwrapper--icon">
<input class="input-field" id="id_date_field" name="date_field" type="text" value="1940-10-9" />
<label class="label--inline-edit" for="id_date_field" title="Edit Date field">{}</label>
</div>
</div>
""".format(
self.date_range_svg
),
)
def test_fieldholder_select(self):
html_output = fieldholder({}, self.form["choice_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_choice_field" title="Choice field">Choice field:</label>
<div class="fieldwrapper fieldwrapper--choicefield fieldwrapper--select ">
<div class="select">
<select class="" id="id_choice_field" name="choice_field">
<option value="1" selected="selected">a</option>
<option value="2">b</option>
</select>
</div>
</div>
</div>
""",
)
def test_fieldholder_password(self):
html_output = fieldholder({}, self.form["pass_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_pass_field" title="Pass field">Pass field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--passwordinput fieldwrapper--icon">
<input class="input-field" id="id_pass_field" name="pass_field" type="password" />
<button class="input-icon input-icon__password" type="button" tabindex="-1">{}</button>
</div>
</div>
""".format(
self.eye_svg
),
)
def test_fieldholder_textarea(self):
html_output = fieldholder({}, self.form["text_field"])
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_text_field" title="Text field">Text field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textarea ">
<textarea class="" cols="40" id="id_text_field" name="text_field" rows="10">abc</textarea>
</div>
</div>
""",
)
def test_fieldholder_field_kwargs(self):
html_output = fieldholder(
{},
self.form["char_field"],
**{
"class": " test class ",
"data_some": "data",
"placeholder": "Fill in some test data",
}
)
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field test class" data-some="data" id="id_char_field" name="char_field" type="text" value="abc" placeholder="Fill in some test data" />
</div>
</div>
""",
)
def test_fieldholder_before_after_field(self):
html_output = fieldholder({}, self.form["char_field"], after_field="@")
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />@
</div>
</div>
""",
)
html_output = fieldholder({}, self.form["char_field"], before_field="!")
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
!<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
html_output = fieldholder(
{}, self.form["char_field"], before_field="!", after_field="@"
)
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput ">
!<input class="input-field" id="id_char_field" name="char_field" type="text" value="abc" />@
</div>
</div>
""",
)
html_output = fieldholder(
{}, self.form["pass_field"], before_field="!", after_field="@"
)
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_pass_field" title="Pass field">Pass field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--passwordinput fieldwrapper--icon">
!<input class="input-field" id="id_pass_field" name="pass_field" type="password" />
<button class="input-icon input-icon__password" type="button" tabindex="-1">{}</button>
</div>
</div>
""".format(
self.eye_svg
),
)
def test_fieldholder_unit_text_left_right(self):
html_output = fieldholder({}, self.form["char_field"], unit_text_left="$")
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput fieldwrapper--unit">
<i class="input-unit input-unit--left">$</i>
<input class="input-field input-field--unit-left" id="id_char_field" name="char_field" type="text" value="abc" />
</div>
</div>
""",
)
html_output = fieldholder(
{}, self.form["char_field"], unit_text_left="$", unit_text_right="KM"
)
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_char_field" title="Char field">Char field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput fieldwrapper--unit">
<i class="input-unit input-unit--left">$</i>
<input class="input-field input-field--unit-left input-field--unit-right" id="id_char_field" name="char_field" type="text" value="abc" />
<i class="input-unit input-unit--right">KM</i>
</div>
</div>
""",
)
html_output = fieldholder(
{}, self.form["pass_field"], unit_text_left="$", unit_text_right="KM"
)
self.assertHTMLEqual(
html_output,
"""
<div class="fieldholder ">
<label for="id_pass_field" title="Pass field">Pass field:</label>
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--passwordinput fieldwrapper--icon">
<input class="input-field" id="id_pass_field" name="pass_field" type="password" />
<button class="input-icon input-icon__password" type="button" tabindex="-1">{}</button>
</div>
</div>
""".format(
self.eye_svg
),
)
class FormFieldholderInlineTestCase(DummyFormMixin, SvgIconsMixin, SimpleTestCase):
maxDiff = None
def test_fieldholder_inline(self):
html_output = fieldholder_inline({}, self.form["char_field"])
self.assertHTMLEqual(
html_output,
"""
<td class="table__cell table__cell--inline-edit ">
<div class="fieldwrapper fieldwrapper--charfield fieldwrapper--textinput fieldwrapper--inline-edit">
<input class="input-field input-field--inline-edit" id="id_char_field" name="char_field" placeholder="Char field" type="text" value="abc" />
<label class="label--inline-edit" for="id_char_field" title="Edit Char field">{}</label>
</div>
</td>
""".format(
self.edit_svg
),
)
def test_fieldholder_inline_integer(self):
html_output = fieldholder_inline({}, self.form["int_field"])
self.assertHTMLEqual(
html_output,
"""
<td class="table__cell table__cell--inline-edit ">
<div class="fieldwrapper fieldwrapper--integerfield fieldwrapper--numberinput fieldwrapper--inline-edit">
<input class="input-field input-field--inline-edit" id="id_int_field" name="int_field" placeholder="Int field" type="number" value="1" />
<label class="label--inline-edit" for="id_int_field" title="Edit Int field">{}</label>
</div>
</td>
""".format(
self.edit_svg
),
)
def test_fieldholder_inline_checkbox(self):
html_output = fieldholder_inline({}, self.form["checkbox_field"])
self.assertHTMLEqual(
html_output,
"""
<td class="table__cell table__cell--inline-edit ">
<div class="fieldwrapper fieldwrapper--booleanfield fieldwrapper--checkboxinput ">
<label class="control checkbox">
<input checked="checked" class="" id="id_checkbox_field" name="checkbox_field" type="checkbox" />
<span class="control-indicator"></span>
<span class="control-label">Checkbox field</span>
</label>
</div>
</td>
""",
)
def test_fieldholder_inline_date(self):
html_output = fieldholder_inline({}, self.form["date_field"])
self.assertHTMLEqual(
html_output,
"""
<td class="table__cell table__cell--inline-edit ">
<div class="fieldwrapper fieldwrapper--datefield fieldwrapper--dateinput fieldwrapper--icon fieldwrapper--inline-edit">
<input class="input-field input-field--inline-edit" id="id_date_field" name="date_field" placeholder="Date field" type="text" value="1940-10-9" />
<label class="label--inline-edit" for="id_date_field" title="Edit Date field">{}</label>
</div>
</td>
""".format(
self.date_range_svg
),
)
def test_fieldholder_inline_select(self):
html_output = fieldholder_inline({}, self.form["choice_field"])
self.assertHTMLEqual(
html_output,
"""
<td class="table__cell table__cell--inline-edit ">
<div class="fieldwrapper fieldwrapper--choicefield fieldwrapper--select select--inline-edit">
<div class="select">
<select class="" id="id_choice_field" name="choice_field">
<option value="1" selected="selected">a</option>
<option value="2">b</option>
</select>
</div>
</div>
</td>
""",
)
def test_fieldholder_inline_radio(self):
html_output = fieldholder_inline({}, self.form["radio_field"])
self.assertHTMLEqual(
html_output,
"""
<td class="table__cell table__cell--inline-edit ">
<div class="fieldwrapper fieldwrapper--choicefield fieldwrapper--radioselect select--inline-edit">
<ul id="id_radio_field">
<li><label for="id_radio_field_0"><input checked="checked" class="" id="id_radio_field_0" name="radio_field" type="radio" value="1"> a</label></li>
<li><label for="id_radio_field_1"><input class="" id="id_radio_field_1" name="radio_field" type="radio" value="2"> b</label></li>
</ul>
</div>
</td>
""",
)
def test_fieldholder_inline_password(self):
html_output = | |
from .rainbow import *
import batman as batman
class SimulatedRainbow(Rainbow):
def __init__(
self,
signal_to_noise=100,
tlim=[-2.5, 2.5] * u.hour,
dt=10 * u.minute,
time=None,
wlim=[0.5, 5] * u.micron,
R=20,
dw=None,
wavelength=None,
star_flux=None,
):
"""
Create a simulated rainbow object.
Parameters
----------
signal_to_noise : float
The signal-to-noise per wavelength per time.
For example, S/N=100 would mean that the
uncertainty on the flux for each each
wavelength-time data point will be 1%.
tlim : list or array of astropy.units.Quantity
The pip install -e '.[develop]'[min, max] times for creating the time grid.
These should have astropy units of time.
dt : astropy.units.Quantity
The d(time) bin size for creating a grid
that is uniform in linear space.
time : array of astropy.units.Quantity
An array of times, if you just want to give
it an entirely custom array.
The time-setting order of precendence is:
1) time
2) tlim + dt
wlim : list or array of astropy.units.Quantity
The [min, max] wavelengths for creating the grid.
These should have astropy units of wavelength.
R : float
The spectral resolution for creating a grid
that is uniform in logarithmic space.
dw : astropy.units.Quantity
The d(wavelength) bin size for creating a grid
that is uniform in linear space.
wavelength : array of astropy.units.Quantity
An array of wavelengths, if you just want to give
it an entirely custom array.
The wavelength-setting order of precendence is:
1) wavelength
2) wlim + dw
3) wlim + R
star_flux : numpy 1D array
An array of fluxes corresponding to the supplied wavelengths.
If left blank, the code assumes a normalized flux of
flux(wavelength) = 1 for all wavelengths.
planet : boolean
Set to True to add a transiting planet.
planet_params: Dictionary
Stores planetary parameters to model, read description for
Rainbow.add_planet transit() for more details.
planet_radius: float or 1D numpy array
Planet Radius/Stellar Radius of planet. Use 1D if wavelength-
dependent. Must be same shape as wavelength array.
"""
Rainbow.__init__(self)
# set up the wavelength grid
self._setup_fake_wavelength_grid(wlim=wlim, R=R, dw=dw, wavelength=wavelength)
# set up the time grid
self._setup_fake_time_grid(tlim=tlim, dt=dt, time=time)
# Save SNR.
self.metadata["signal_to_noise"] = signal_to_noise
# If the flux of the star is not given,
# assume a continuum-normlized flux where fx=1 at all wavelengths.
if star_flux is None:
self.fluxlike["model"] = np.ones(self.shape)
# If the flux vs wavelength of the star is supplied,
# include it in the model.
else:
# Check to make sure the flux and wavelengths
# have the same shape.
if len(star_flux) == len(self.wavelike["wavelength"]):
self.fluxlike["model"] = np.transpose([star_flux] * self.shape[1])
# Set uncertainty.
self.fluxlike["uncertainty"] = self.fluxlike["model"] / signal_to_noise
self.fluxlike["flux"] = np.random.normal(
self.fluxlike["model"], self.fluxlike["uncertainty"]
)
def _setup_fake_time_grid(
self, tlim=[-2.5 * u.hour, 2.5 * u.hour], dt=1 * u.minute, time=None
):
"""
Create a fake time grid.
Parameters
----------
tlim : list or array of astropy.units.Quantity
The [min, max] times for creating the time grid.
These should have astropy units of time.
dt : astropy.units.Quantity
The d(time) bin size for creating a grid
that is uniform in linear space.
time : array of astropy.units.Quantity
An array of times, if you just want to give
it an entirely custom array.
The time-setting order of precendence is:
1) time
2) tlim + dt
"""
# check we're trying to do exactly one thing
if (tlim is None) and (time is None):
raise RuntimeError("Please specify either `tlim` or `time`.")
if time is None:
t_unit = tlim[0].unit
t_unit.to("s")
time = np.arange(tlim[0] / t_unit, tlim[1] / t_unit, dt / t_unit) * t_unit
else:
t_unit = time.unit
self.timelike["time"] = u.Quantity(time)
# TODO, make this match up better with astropy time
def _setup_fake_wavelength_grid(
self, wlim=[0.5 * u.micron, 5 * u.micron], R=100, dw=None, wavelength=None
):
"""
Create a fake wavelength grid.
Parameters
----------
wlim : list or array of astropy.units.Quantity
The [min, max] wavelengths for creating the grid.
These should have astropy units of wavelength.
R : float
The spectral resolution for creating a grid
that is uniform in logarithmic space.
dw : astropy.units.Quantity
The d(wavelength) bin size for creating a grid
that is uniform in linear space.
wavelength : array of astropy.units.Quantity
An array of wavelengths, if you just want to give
it an entirely custom array.
The wavelength-setting order of precendence is:
1) wavelength
2) wlim + dw
3) wlim + R
"""
# check we're trying to do exactly one thing
if (wlim is None) and (wavelength is None):
raise RuntimeError("Please specify either `wlim` or `wavelength`.")
# create a linear or logarithmic grid
if wavelength is None:
# check that we're
if (R is None) and (dw is None):
raise RuntimeError("Please specify either `R` or `dw`.")
w_unit = wlim[0].unit
if dw is None:
self.metadata["R"] = R
# self.metadata["wscale"] = "log"
logw_min = np.log(wlim[0] / w_unit)
logw_max = np.log(wlim[1] / w_unit)
logw = np.arange(logw_min, logw_max, 1 / R)
wavelength = np.exp(logw) * w_unit
elif dw is not None:
self.metadata["dw"] = dw
# self.metadata["wscale"] = "linear"
wavelength = (
np.arange(wlim[0] / w_unit, wlim[1] / w_unit, self.dw / w_unit)
* w_unit
)
# or just make sure the wavelength grid has units
elif wavelength is not None:
w_unit = wavelength.unit
# make sure the wavelength array has units
self.wavelike["wavelength"] = u.Quantity(wavelength)
self._guess_wscale()
# this should break if the units aren't length
w_unit.to("m")
def inject_transit(self, planet_params={}, planet_radius=0.1):
"""
Simulate a wavelength-dependent planetary transit using
batman.
Parameters
----------
planet_params : Dictionary
Values for planetary parameters for use in batman modelling.
Any values not supplied will be set to defaults:
"t0" = time of inferior conjunction (days) (default 0)
"per" = orbital period (days) (detault 1)
"a" = semi-major axis (units of stellar radii) (default 15)
"inc" = inclination (degrees) (default 90)
"ecc" = eccentricity (default 0)
"w" = longitude of periastron (degrees)(default 0)
"limb_dark" = limb-darkening model (default "nonlinear"), possible
values described in more detail in batman documentation
"u" = limb-darkening coefficients (default [0.5, 0.1, 0.1, -0.1])
Can take 3 forms:
-A single value (if limb-darkening law requires only one value)
-A 1D list/array of coefficients corresponding to the limb-darkening
law
-A 2D array of the form (n_wavelengths, n_coefficients) where
each row is the set of limb-darkening coefficients corresponding
to a single wavelength
Note that this currently does not calculate the appropriate
coefficient vs wavelength variations itself- there exist codes
(such as hpparvi/PyLDTk and nespinoza/limb-darkening) which
can be used for this.
example value: planet_params = {"a":12, "inc":87}
planet_radius = Two options:
1D array with same dimensions as wavelength array,
each value corresponds to planet radius/stellar radius at that
wavelength.
float representing Rp/Rstar if the radius is not wavelength-dependent.
example value: planet_radius = 0.01,
"""
# First, make sure planet_radius has the right dimension.
if type(planet_radius) != float and len(planet_radius) != self.nwave:
print(
"Invalid planet radius array: must be float or have shape "
+ str(np.shape(self.wavelike["wavelength"]))
)
# Defaults for planet simulation.
defaults = {
"t0": 0,
"per": 3,
"a": 10,
"inc": 90,
"ecc": 0,
"w": 0,
"limb_dark": "nonlinear",
"u": [0.5, 0.1, 0.1, -0.1],
}
# Read in planet parameters.
for i in range(len(planet_params.keys())):
key = list(planet_params.keys())[i]
if key in list(defaults.keys()):
defaults[key] = planet_params[key]
else:
print("Warning: " + str(key) + " not a valid parameter")
# Initialize batman model.
params = batman.TransitParams()
params.t0 = defaults["t0"]
params.per = defaults["per"]
params.a = defaults["a"]
params.inc = defaults["inc"]
params.ecc = defaults["ecc"]
params.w = defaults["w"]
params.limb_dark = defaults["limb_dark"]
# Deal with limb-darkening.
if len(np.shape(defaults["u"])) < 2: # Coefficients constant with wavelength
u_arr = np.tile(defaults["u"], (self.nwave, 1))
elif (
len(np.shape(defaults["u"])) == 2
): # 2D array of coefficients, along wavelength axis
if np.shape(defaults["u"])[0] != self.nwave:
print("Shape of limb-darkening array does not match wavelengths.")
return
u_arr = defaults["u"]
else:
print("Invalid limb-darkening coefficient array.")
# Read in planetary radius.
if type(planet_radius) == float:
rprs = np.zeros(self.nwave) + planet_radius
else:
rprs = planet_radius
planet_flux = np.zeros((self.nwave, self.ntime))
for i in range(self.nwave):
params.rp = rprs[i]
params.u = u_arr[i]
# print(params.u)
try:
m
except NameError:
m = batman.TransitModel(params, self.timelike["time"].to("day").value)
| |
<reponame>allegrocy/gwascalon<gh_stars>0
import discord
from discord.ext import commands
from redbot.core import checks
import urllib
import aiohttp
import async_timeout
from bs4 import BeautifulSoup, SoupStrainer
import time
import asyncio
import datetime
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import sys
def parse(search_terms):
search_string = urllib.parse.quote(search_terms)
url = "https://kamadan.decltype.org/search/" + search_string
return url
async def get_response(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0'}
try:
async with aiohttp.ClientSession() as session:
with async_timeout.timeout(10):
async with session.get(url, headers=headers) as response:
return await response.text()
except asyncio.TimeoutError:
error_message = "Oops! Either https://kamadan.decltype.org is down or this bot has gone on strike.\nTry searching directly on the website or at <#258069080869699586> (#Kamadan on the main Guild Wars discord)."
raise RuntimeError(error_message)
def append_am(author, authors, message, messages):
if author is not None:
len(author)
authors.append(author)
if message is not None:
message = message.replace("```","")
messages.append(message)
return authors, messages
def get_info(htmlSource, mobile):
authors, messages = [], []
authors_temp = []
only_tr_tags = SoupStrainer("tr")
soup = BeautifulSoup(htmlSource, "html.parser", parse_only=only_tr_tags)
first_row = soup.find("tr", {"class":"row"})
author = first_row.find("div", {"class":"name"}).get_text()
message = first_row.find("td", {"class":"message"}).get_text()
authors_temp, messages = append_am(author, authors_temp, message, messages)
for row in soup.find("tr", {"class":"row"}).next_siblings:
author = row.find("div", {"class":"name"}).get_text()
message = row.find("td", {"class":"message"}).get_text()
authors_temp, messages = append_am(author, authors_temp, message, messages)
longest_length = len(max(authors_temp, key=len))
for author in authors_temp:
if mobile is True:
author = "**{}**".format(author)
else:
pad_length = longest_length - len(author)
padding = " " * pad_length
author = padding + author
authors.append(author)
am_list = [authors, messages]
return am_list
async def submit_to_executor(executor, htmlSource, mobile):
future = executor.submit(get_info, htmlSource, mobile)
await asyncio.sleep(0)
result = future.result()
return result[0], result[1]
def format_results(authors, messages, search_terms, page_number, url, results_length, t0, mobile):
am_list = []
for i in range(len(authors)):
am_list.append(authors[i] + ": " + messages[i])
t1 = time.time() - t0
formatted_results = "\n".join(am_list)
# search_time = "\n```Search took %.5f" % t1 + "s. Click below for more results."
search_time = "\n```"
search_time_str_length = len(search_time)
formatted_results = "Search results for: **{}** (page {}) >> {} results per page\n{}\n```http\n{}".format(search_terms, page_number, results_length, url, formatted_results)
formatted_results = formatted_results[:2000-search_time_str_length] + search_time
if mobile is True:
formatted_results = formatted_results.replace("```http\n","")
formatted_results = formatted_results.replace("```","")
return formatted_results
def round_up(value, round_up_to):
remainder = (value * 8) % round_up_to
if remainder == 0:
result = int((value * 8) / round_up_to)
else:
result = int(((value * 8) + (round_up_to - remainder)) / round_up_to)
return result
digit = {1:"\N{DIGIT ONE}\N{COMBINING ENCLOSING KEYCAP}",
2:"\N{DIGIT TWO}\N{COMBINING ENCLOSING KEYCAP}",
3:"\N{DIGIT THREE}\N{COMBINING ENCLOSING KEYCAP}",
4:"\N{DIGIT FOUR}\N{COMBINING ENCLOSING KEYCAP}",
5:"\N{DIGIT FIVE}\N{COMBINING ENCLOSING KEYCAP}",
6:"\N{DIGIT SIX}\N{COMBINING ENCLOSING KEYCAP}",
7:"\N{DIGIT SEVEN}\N{COMBINING ENCLOSING KEYCAP}",
8:"\N{DIGIT EIGHT}\N{COMBINING ENCLOSING KEYCAP}"
}
def parse_kamadan_logs(message_content, search_terms):
kama_content = message_content[8:len(message_content)-4]
kama_content_lines = kama_content.split("\n")
logs_authors, logs_messages = [], []
for kama_content_line in kama_content_lines:
kama_author_end_pos = kama_content_line.find(":")
kama_author = kama_content_line[:kama_author_end_pos]
kama_message = kama_content_line[kama_author_end_pos+2:]
if search_terms in kama_message:
logs_authors.append(kama_author)
logs_messages.append(kama_message)
return logs_authors, logs_messages
def format_results_kamadan_logs(authors, messages, search_terms):
am_list = []
for i in range(len(authors)):
am_list.append("{}: {}".format(authors[i], messages[i]))
formatted_results = "\n".join(am_list)
formatted_results = "Search results for: **{}**\n```http\n{}```".format(search_terms, formatted_results)
formatted_results = formatted_results[:2000]
return formatted_results
class PriceCheckAiohttp():
"""Price check, powered by kamadan.deceltype.com."""
def __init__(self, bot):
self.bot = bot
self.init_datetime = datetime.datetime.utcnow()
self.use_count = 0
self.cache = {}
if sys.platform == "win32":
print("I'm on windows")
self.executor = ThreadPoolExecutor(max_workers=1)
else:
self.executor = ProcessPoolExecutor(max_workers=3)
def update_results(self, message_id, message_to_edit, page_number, results_length, t0, mobile):
authors = self.cache[message_id]["authors"]
messages = self.cache[message_id]["messages"]
range_upper_end = page_number * results_length
range_lower_end = range_upper_end - results_length
if len(authors) < range_upper_end:
range_upper_end = len(authors)
authors = authors[range_lower_end:range_upper_end]
messages = messages[range_lower_end:range_upper_end]
search_terms = self.cache[message_id]["search_terms"]
url = parse(search_terms)
formatted_results = format_results(authors, messages, search_terms, page_number, url, results_length, t0, mobile)
if len(authors) == 0:
formatted_results = "No search results."
formatted_results = "Search results for: **{}** (page {}) >> {} results per page\n{}\n```http\n{}```".format(search_terms, page_number, results_length, url, formatted_results)
if mobile is True:
formatted_results = formatted_results.replace("```http\n","")
formatted_results = formatted_results.replace("```","")
return formatted_results
async def price_check(self, ctx, search_terms, results_length, mobile):
"""Main price check function"""
t0 = time.time()
search_terms = search_terms.replace("```", "")
search_terms = search_terms.replace(") >> ", "")
search_terms = search_terms.replace(" results per page__", "")
search_terms = search_terms.replace("** (page ", "")
page_number = 1
authors, messages = None, None
url = parse(search_terms)
self.use_count += 1
try:
htmlSource = await get_response(url)
# authors, messages = get_info(htmlSource, mobile)
authors, messages = await submit_to_executor(self.executor, htmlSource, mobile)
formatted_results = format_results(authors[:results_length], messages[:results_length], search_terms, page_number, url, results_length, t0, mobile)
return formatted_results, authors, messages, search_terms
except AttributeError:
error_message = "No search results. Stop feeding me rubbish!"
raise RuntimeError(error_message)
except Exception as e:
e_name = e.__class__.__name__
if e_name == "ClientConnectorError":
error_message = "Page failed to load. Site may be down. Try: " + url
raise RuntimeError(error_message)
else:
error_message = "`{}`".format(e)
raise RuntimeError(error_message)
async def cache_more_results(self, message_id, authors, messages, search_terms, results_length, mobile):
if authors is None:
return
try:
if len(authors) == 25:
max_pages = round_up(results_length, 25)
tasks = []
# print("Max pages = {}".format(max_pages))
for page in range(1,max_pages):
# print("Page = {}".format(page))
length = page * 25
url = parse(search_terms)
url = url + "/" + str(length)
task = asyncio.ensure_future(get_response(url))
tasks.append(task)
# print("-----------APPENDING ONE TASK!")
htmlSources = await asyncio.gather(*tasks)
# for htmlSource in htmlSources:
# authors_next_page, messages_next_page = get_info(htmlSource, mobile)
# test_authors.extend(authors_next_page)
# test_messages.extend(messages_next_page)
futures = []
for htmlSource in htmlSources:
future = self.executor.submit(get_info, htmlSource, mobile)
futures.append(future)
for future in futures:
authors_next_page, messages_next_page = future.result()
authors.extend(authors_next_page)
messages.extend(messages_next_page)
except AttributeError:
pass
self.cache[message_id] = {}
self.cache[message_id]["authors"] = authors
self.cache[message_id]["messages"] = messages
self.cache[message_id]["search_terms"] = search_terms
self.last_message_id = message_id
async def add_reactions(self, message):
if message.author.id == self.bot.user.id and message.content.startswith("Search results for:"):
message_to_react_to = message
for skill_number in digit.keys():
await message_to_react_to.add_reaction(digit[skill_number])
async def pc_reaction_monitor(self, emoji, message_id, channel_id, user_id):
if user_id == self.bot.user.id:
return
time_of_message = discord.utils.snowflake_time(message_id)
time_now = datetime.datetime.utcnow()
time_limit = time_now - datetime.timedelta(days=30)
if time_of_message < time_limit:
return
channel = discord.utils.get(self.bot.get_all_channels(), id=channel_id)
message_to_edit = await channel.get_message(message_id)
if message_to_edit.author.id == self.bot.user.id and message_to_edit.content.find("Search results") != -1:
reaction_emoji = emoji.name
for page_number, emoji in digit.items():
if reaction_emoji == emoji:
break
message_header = message_to_edit.content[:message_to_edit.content.find("\n```http\n")]
start_phrase = ") >> "
start_pos = message_header.find(start_phrase)
end_pos = message_header.find(" results per page")
results_length = int(message_header[start_pos + len(start_phrase):end_pos])
if message_to_edit.content.find("```") != -1:
mobile = False
else:
mobile = True
t0 = time.time()
if message_to_edit.id in self.cache.keys():
formatted_results = self.update_results(message_id, message_to_edit, page_number, results_length, t0, mobile)
else:
start_phrase = "Search results for: **"
start_pos = message_header.find(start_phrase)
end_pos = message_header.find("** (page ")
search_terms = message_header[start_pos + len(start_phrase):end_pos]
url = parse(search_terms)
htmlSource = await get_response(url)
# authors, messages = get_info(htmlSource, mobile)
authors, messages = await submit_to_executor(self.executor, htmlSource, mobile)
await self.cache_more_results(message_id, authors, messages, search_terms, results_length, mobile)
formatted_results = self.update_results(message_id, message_to_edit, page_number, results_length, t0, mobile)
if mobile is True:
await message_to_edit.edit(content="Clearing previous message... This is necessary due to a bug with Discord's client.")
await message_to_edit.edit(content=formatted_results)
self.use_count += 1
@commands.group(invoke_without_command=True)
async def pc(self, ctx, *, search_terms: str):
"""Perform price checks using https://kamadan.decltype.org."""
results_length = 6
mobile = False
try:
formatted_results, authors, messages, search_terms = await self.price_check(ctx, search_terms, results_length, mobile)
except Exception as e:
e = str(e)
await ctx.send(e[1:-1])
return
message = await ctx.send(formatted_results)
await self.cache_more_results(message.id, authors, messages, search_terms, results_length, mobile)
await self.add_reactions(message)
@pc.command(name="author")
async def pc_author(self, ctx, *, search_terms: str):
"""Price check for a specific author."""
mobile = False
results_length = 6
search_terms = "author:\"{}\"".format(search_terms.title())
try:
formatted_results, authors, messages, search_terms = await self.price_check(ctx, search_terms, results_length, mobile)
except Exception as e:
e = str(e)
await ctx.send(e[1:-1])
return
message = await ctx.send(formatted_results)
await self.cache_more_results(message.id, authors, messages, search_terms, results_length, mobile)
await self.add_reactions(message)
@pc.command(name="m")
async def pc_mobile(self, ctx, *, search_terms: str):
"""Formatted for mobile devices."""
mobile = True
results_length = 6
try:
formatted_results, authors, messages, search_terms = await self.price_check(ctx, search_terms, results_length, mobile)
except Exception as e:
e = str(e)
await ctx.send(e[1:-1])
return
message = await ctx.send(formatted_results)
await self.cache_more_results(message.id, authors, messages, search_terms, results_length, mobile)
await self.add_reactions(message)
@pc.command(name="mauthor")
async def pc_mobile_author(self, ctx, *, search_terms: str):
"""Price check for a specific author on mobile."""
mobile = True
results_length = 6
search_terms = "author:\"{}\"".format(search_terms.title())
try:
formatted_results, authors, messages, search_terms = await self.price_check(ctx, search_terms, results_length, mobile)
except Exception as e:
e = str(e)
await ctx.send(e[1:-1])
return
message = await ctx.send(formatted_results)
await self.cache_more_results(message.id, authors, messages, search_terms, results_length, mobile)
await self.add_reactions(message)
async def pc_bot_spam_only(self, ctx, search_terms, results_length, mobile):
if ctx.channel.name == "bot-spam":
try:
formatted_results, authors, messages, search_terms = await self.price_check(ctx, | |
env = gym.wrappers.Monitor(env, monitor_path, resume=True)
return env
self.env = wrap(self.env)
def make_env(vector_index):
return wrap(
env_creator(
env_context.copy_with_overrides(
vector_index=vector_index, remote=remote_worker_envs)))
self.tf_sess = None
policy_dict = _validate_and_canonicalize(policy, self.env)
self.policies_to_train = policies_to_train or list(policy_dict.keys())
# set numpy and python seed
if seed is not None:
np.random.seed(seed)
random.seed(seed)
if not hasattr(self.env, "seed"):
raise ValueError("Env doesn't support env.seed(): {}".format(
self.env))
self.env.seed(seed)
try:
import torch
torch.manual_seed(seed)
except ImportError:
logger.info("Could not seed torch")
if _has_tensorflow_graph(policy_dict):
if (ray.is_initialized()
and ray.worker._mode() != ray.worker.LOCAL_MODE
and not ray.get_gpu_ids()):
logger.debug("Creating policy evaluation worker {}".format(
worker_index) +
" on CPU (please ignore any CUDA init errors)")
if not tf:
raise ImportError("Could not import tensorflow")
with tf.Graph().as_default():
if tf_session_creator:
self.tf_sess = tf_session_creator()
else:
self.tf_sess = tf.Session(
config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True)))
with self.tf_sess.as_default():
# set graph-level seed
if seed is not None:
tf.set_random_seed(seed)
self.policy_map, self.preprocessors = \
self._build_policy_map(policy_dict, policy_config)
else:
self.policy_map, self.preprocessors = self._build_policy_map(
policy_dict, policy_config)
self.multiagent = set(self.policy_map.keys()) != {DEFAULT_POLICY_ID}
if self.multiagent:
if not ((isinstance(self.env, MultiAgentEnv)
or isinstance(self.env, ExternalMultiAgentEnv))
or isinstance(self.env, BaseEnv)):
raise ValueError(
"Have multiple policies {}, but the env ".format(
self.policy_map) +
"{} is not a subclass of BaseEnv, MultiAgentEnv or "
"ExternalMultiAgentEnv?".format(self.env))
self.filters = {
policy_id: get_filter(observation_filter,
policy.observation_space.shape)
for (policy_id, policy) in self.policy_map.items()
}
if self.worker_index == 0:
logger.info("Built filter map: {}".format(self.filters))
# Always use vector env for consistency even if num_envs = 1
self.async_env = BaseEnv.to_base_env(
self.env,
make_env=make_env,
num_envs=num_envs,
remote_envs=remote_worker_envs,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
self.num_envs = num_envs
if self.batch_mode == "truncate_episodes":
unroll_length = batch_steps
pack_episodes = True
elif self.batch_mode == "complete_episodes":
unroll_length = float("inf") # never cut episodes
pack_episodes = False # sampler will return 1 episode per poll
else:
raise ValueError("Unsupported batch mode: {}".format(
self.batch_mode))
self.io_context = IOContext(log_dir, policy_config, worker_index, self)
self.reward_estimators = []
for method in input_evaluation:
if method == "simulation":
logger.warning(
"Requested 'simulation' input evaluation method: "
"will discard all sampler outputs and keep only metrics.")
sample_async = True
elif method == "is":
ise = ImportanceSamplingEstimator.create(self.io_context)
self.reward_estimators.append(ise)
elif method == "wis":
wise = WeightedImportanceSamplingEstimator.create(
self.io_context)
self.reward_estimators.append(wise)
else:
raise ValueError(
"Unknown evaluation method: {}".format(method))
if sample_async:
self.sampler = AsyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
blackhole_outputs="simulation" in input_evaluation,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.sampler.start()
else:
self.sampler = SyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.input_reader = input_creator(self.io_context)
assert isinstance(self.input_reader, InputReader), self.input_reader
self.output_writer = output_creator(self.io_context)
assert isinstance(self.output_writer, OutputWriter), self.output_writer
logger.debug(
"Created rollout worker with env {} ({}), policies {}".format(
self.async_env, self.env, self.policy_map))
@override(EvaluatorInterface)
def sample(self):
"""Evaluate the current policies and return a batch of experiences.
Return:
SampleBatch|MultiAgentBatch from evaluating the current policies.
"""
if self._fake_sampler and self.last_batch is not None:
return self.last_batch
if log_once("sample_start"):
logger.info("Generating sample batch of size {}".format(
self.sample_batch_size))
batches = [self.input_reader.next()]
steps_so_far = batches[0].count
# In truncate_episodes mode, never pull more than 1 batch per env.
# This avoids over-running the target batch size.
if self.batch_mode == "truncate_episodes":
max_batches = self.num_envs
else:
max_batches = float("inf")
while steps_so_far < self.sample_batch_size and len(
batches) < max_batches:
batch = self.input_reader.next()
steps_so_far += batch.count
batches.append(batch)
batch = batches[0].concat_samples(batches)
if self.callbacks.get("on_sample_end"):
self.callbacks["on_sample_end"]({"worker": self, "samples": batch})
# Always do writes prior to compression for consistency and to allow
# for better compression inside the writer.
self.output_writer.write(batch)
# Do off-policy estimation if needed
if self.reward_estimators:
for sub_batch in batch.split_by_episode():
for estimator in self.reward_estimators:
estimator.process(sub_batch)
if log_once("sample_end"):
logger.info("Completed sample batch:\n\n{}\n".format(
summarize(batch)))
if self.compress_observations == "bulk":
batch.compress(bulk=True)
elif self.compress_observations:
batch.compress()
if self._fake_sampler:
self.last_batch = batch
return batch
@DeveloperAPI
@ray.method(num_return_vals=2)
def sample_with_count(self):
"""Same as sample() but returns the count as a separate future."""
batch = self.sample()
return batch, batch.count
@override(EvaluatorInterface)
def get_weights(self, policies=None):
if policies is None:
policies = self.policy_map.keys()
return {
pid: policy.get_weights()
for pid, policy in self.policy_map.items() if pid in policies
}
@override(EvaluatorInterface)
def set_weights(self, weights):
for pid, w in weights.items():
self.policy_map[pid].set_weights(w)
@override(EvaluatorInterface)
def compute_gradients(self, samples):
if log_once("compute_gradients"):
logger.info("Compute gradients on:\n\n{}\n".format(
summarize(samples)))
if isinstance(samples, MultiAgentBatch):
grad_out, info_out = {}, {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "compute_gradients")
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
grad_out[pid], info_out[pid] = (
self.policy_map[pid]._build_compute_gradients(
builder, batch))
grad_out = {k: builder.get(v) for k, v in grad_out.items()}
info_out = {k: builder.get(v) for k, v in info_out.items()}
else:
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
grad_out[pid], info_out[pid] = (
self.policy_map[pid].compute_gradients(batch))
else:
grad_out, info_out = (
self.policy_map[DEFAULT_POLICY_ID].compute_gradients(samples))
info_out["batch_count"] = samples.count
if log_once("grad_out"):
logger.info("Compute grad info:\n\n{}\n".format(
summarize(info_out)))
return grad_out, info_out
@override(EvaluatorInterface)
def apply_gradients(self, grads):
if log_once("apply_gradients"):
logger.info("Apply gradients:\n\n{}\n".format(summarize(grads)))
if isinstance(grads, dict):
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "apply_gradients")
outputs = {
pid: self.policy_map[pid]._build_apply_gradients(
builder, grad)
for pid, grad in grads.items()
}
return {k: builder.get(v) for k, v in outputs.items()}
else:
return {
pid: self.policy_map[pid].apply_gradients(g)
for pid, g in grads.items()
}
else:
return self.policy_map[DEFAULT_POLICY_ID].apply_gradients(grads)
@override(EvaluatorInterface)
def learn_on_batch(self, samples):
if log_once("learn_on_batch"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize(samples)))
if isinstance(samples, MultiAgentBatch):
info_out = {}
to_fetch = {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "learn_on_batch")
else:
builder = None
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
policy = self.policy_map[pid]
if builder and hasattr(policy, "_build_learn_on_batch"):
to_fetch[pid] = policy._build_learn_on_batch(
builder, batch)
else:
info_out[pid] = policy.learn_on_batch(batch)
info_out.update({k: builder.get(v) for k, v in to_fetch.items()})
else:
info_out = self.policy_map[DEFAULT_POLICY_ID].learn_on_batch(
samples)
if log_once("learn_out"):
logger.info("Training output:\n\n{}\n".format(summarize(info_out)))
return info_out
@DeveloperAPI
def get_metrics(self):
"""Returns a list of new RolloutMetric objects from evaluation."""
out = self.sampler.get_metrics()
for m in self.reward_estimators:
out.extend(m.get_metrics())
return out
@DeveloperAPI
def foreach_env(self, func):
"""Apply the given function to each underlying env instance."""
envs = self.async_env.get_unwrapped()
if not envs:
return [func(self.async_env)]
else:
return [func(e) for e in envs]
@DeveloperAPI
def get_policy(self, policy_id=DEFAULT_POLICY_ID):
"""Return policy for the specified id, or None.
Arguments:
policy_id (str): id of policy to return.
"""
return self.policy_map.get(policy_id)
@DeveloperAPI
def for_policy(self, func, policy_id=DEFAULT_POLICY_ID):
"""Apply the given function to the specified policy."""
return func(self.policy_map[policy_id])
@DeveloperAPI
def foreach_policy(self, func):
"""Apply the given function to each (policy, policy_id) tuple."""
return [func(policy, pid) for pid, policy in self.policy_map.items()]
@DeveloperAPI
def foreach_trainable_policy(self, func):
"""Apply the given function to each (policy, policy_id) tuple.
This only applies func to policies in `self.policies_to_train`."""
return [
func(policy, pid) for pid, policy in self.policy_map.items()
if pid in self.policies_to_train
]
@DeveloperAPI
def sync_filters(self, new_filters):
"""Changes self's filter to given and rebases any accumulated delta.
Args:
new_filters (dict): Filters with new state to update local copy.
"""
assert all(k in new_filters for k in self.filters)
for k in self.filters:
self.filters[k].sync(new_filters[k])
@DeveloperAPI
def get_filters(self, flush_after=False):
"""Returns a snapshot of filters.
Args:
flush_after (bool): Clears the filter buffer state.
Returns:
return_filters (dict): Dict for serializable filters
"""
return_filters = {}
for k, f in self.filters.items():
return_filters[k] = f.as_serializable()
if flush_after:
f.clear_buffer()
return return_filters
@DeveloperAPI
def save(self):
filters = self.get_filters(flush_after=True)
state = {
pid: self.policy_map[pid].get_state()
for pid in self.policy_map
}
return pickle.dumps({"filters": filters, "state": state})
@DeveloperAPI
def restore(self, objs):
objs = pickle.loads(objs)
self.sync_filters(objs["filters"])
for pid, state in objs["state"].items():
self.policy_map[pid].set_state(state)
@DeveloperAPI
def set_global_vars(self, global_vars):
self.foreach_policy(lambda p, _: p.on_global_var_update(global_vars))
@DeveloperAPI
def export_policy_model(self, export_dir, policy_id=DEFAULT_POLICY_ID):
self.policy_map[policy_id].export_model(export_dir)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir,
filename_prefix="model",
policy_id=DEFAULT_POLICY_ID):
self.policy_map[policy_id].export_checkpoint(export_dir,
filename_prefix)
@DeveloperAPI
def stop(self):
self.async_env.stop()
def _build_policy_map(self, policy_dict, policy_config):
policy_map = {}
preprocessors = {}
for name, (cls, obs_space, act_space,
conf) in sorted(policy_dict.items()):
logger.debug("Creating policy for {}".format(name))
merged_conf = merge_dicts(policy_config, conf)
if self.preprocessing_enabled:
preprocessor = ModelCatalog.get_preprocessor_for_space(
obs_space, merged_conf.get("model"))
preprocessors[name] = preprocessor
obs_space = preprocessor.observation_space
else:
preprocessors[name] = NoPreprocessor(obs_space)
if isinstance(obs_space, gym.spaces.Dict) or \
isinstance(obs_space, gym.spaces.Tuple):
raise ValueError(
"Found raw Tuple|Dict space as input to policy. "
"Please preprocess these observations with a "
"Tuple|DictFlatteningPreprocessor.")
if tf:
with tf.variable_scope(name):
policy_map[name] = cls(obs_space, act_space, merged_conf)
else:
policy_map[name] = cls(obs_space, act_space, merged_conf)
if self.worker_index == 0:
logger.info("Built policy map: {}".format(policy_map))
logger.info("Built preprocessor map: {}".format(preprocessors))
return policy_map, preprocessors
def __del__(self):
if hasattr(self, "sampler") and isinstance(self.sampler, AsyncSampler):
self.sampler.shutdown = True
def _validate_and_canonicalize(policy, env):
if isinstance(policy, dict):
_validate_multiagent_config(policy)
return policy
elif not issubclass(policy, Policy):
raise ValueError("policy must be a rllib.Policy class")
else:
if (isinstance(env, MultiAgentEnv)
and not hasattr(env, "observation_space")):
raise ValueError(
"MultiAgentEnv must have observation_space defined if run "
"in a single-agent configuration.")
return {
DEFAULT_POLICY_ID: (policy, env.observation_space,
env.action_space, {})
| |
if not np.isnan(intensity_range[0]):
if fill_value is None:
voxel_grid[voxel_grid < intensity_range[0]] = intensity_range[0]
else:
voxel_grid[voxel_grid < intensity_range[0]] = fill_value[0]
# Upper boundary
if not np.isnan(intensity_range[1]):
if fill_value is None:
voxel_grid[voxel_grid > intensity_range[1]] = intensity_range[1]
else:
voxel_grid[voxel_grid > intensity_range[1]] = fill_value[1]
# Set the updated voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
def normalise_intensities(self, norm_method="none", intensity_range=None, saturation_range=None, mask=None):
"""
Normalises image intensities
:param norm_method: string defining the normalisation method. Should be one of "none", "range", "standardisation"
:param intensity_range: range of intensities for normalisation
:return:
"""
# Skip for missing images
if self.is_missing:
return
if intensity_range is None:
intensity_range = [np.nan, np.nan]
if mask is None:
mask = np.ones(self.size, dtype=np.bool)
else:
mask = mask.astype(np.bool)
if np.sum(mask) == 0:
mask = np.ones(self.size, dtype=np.bool)
if saturation_range is None:
saturation_range = [np.nan, np.nan]
if norm_method == "none":
return
elif norm_method == "range":
# Normalisation to [0, 1] range using fixed intensities.
# Get voxel grid
voxel_grid = self.get_voxel_grid()
# Find maximum and minimum intensities
if np.isnan(intensity_range[0]):
min_int = np.min(voxel_grid[mask])
else:
min_int = intensity_range[0]
if np.isnan(intensity_range[1]):
max_int = np.max(voxel_grid[mask])
else:
max_int = intensity_range[1]
# Normalise by range
if not max_int == min_int:
voxel_grid = (voxel_grid - min_int) / (max_int - min_int)
else:
voxel_grid = voxel_grid - min_int
# Update the voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
self.is_normalised = True
elif norm_method == "relative_range":
# Normalisation to [0, 1]-ish range using relative intensities.
# Get voxel grid
voxel_grid = self.get_voxel_grid()
min_int_rel = 0.0
if not np.isnan(intensity_range[0]):
min_int_rel = intensity_range[0]
max_int_rel = 1.0
if not np.isnan(intensity_range[1]):
max_int_rel = intensity_range[1]
# Compute minimum and maximum intensities.
value_range = [np.min(voxel_grid[mask]), np.max(voxel_grid[mask])]
min_int = value_range[0] + min_int_rel * (value_range[1] - value_range[0])
max_int = value_range[0] + max_int_rel * (value_range[1] - value_range[0])
# Normalise by range
if not max_int == min_int:
voxel_grid = (voxel_grid - min_int) / (max_int - min_int)
else:
voxel_grid = voxel_grid - min_int
# Update the voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
self.is_normalised = True
elif norm_method == "quantile_range":
# Normalisation to [0, 1]-ish range based on quantiles.
# Get voxel grid
voxel_grid = self.get_voxel_grid()
min_quantile = 0.0
if not np.isnan(intensity_range[0]):
min_quantile = intensity_range[0]
max_quantile = 1.0
if not np.isnan(intensity_range[1]):
max_quantile = intensity_range[1]
# Compute quantiles from voxel grid.
min_int = np.quantile(voxel_grid[mask], q=min_quantile)
max_int = np.quantile(voxel_grid[mask], q=max_quantile)
# Normalise by range
if not max_int == min_int:
voxel_grid = (voxel_grid - min_int) / (max_int - min_int)
else:
voxel_grid = voxel_grid - min_int
# Update the voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
self.is_normalised = True
elif norm_method == "standardisation":
# Normalisation to mean 0 and standard deviation 1.
# Get voxel grid
voxel_grid = self.get_voxel_grid()
# Determine mean and standard deviation of the voxel intensities
mean_int = np.mean(voxel_grid[mask])
sd_int = np.std(voxel_grid[mask])
# Protect against invariance.
if sd_int == 0.0: sd_int = 1.0
# Normalise
voxel_grid = (voxel_grid - mean_int) / sd_int
# Update the voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
self.is_normalised = True
else:
raise ValueError(f"{norm_method} is not a valid method for normalising intensity values.")
self.saturate(intensity_range=saturation_range)
def rotate(self, angle):
"""Rotate volume along z-axis."""
# Skip for missing images
if self.is_missing:
return
import scipy.ndimage as ndi
from luna.radiology.mirp.featureSets.volumeMorphology import get_rotation_matrix
# Find actual output size of x-y plane
new_z_dim = np.asmatrix([self.size[0], 0.0, 0.0]) * get_rotation_matrix(np.radians(angle), dim=3, rot_axis=0)
new_y_dim = np.asmatrix([0.0, self.size[1], 0.0]) * get_rotation_matrix(np.radians(angle), dim=3, rot_axis=0)
new_x_dim = np.asmatrix([0.0, 0.0, self.size[2]]) * get_rotation_matrix(np.radians(angle), dim=3, rot_axis=0)
new_dim_flt = np.squeeze(np.array(np.abs(new_z_dim)) + np.array(np.abs(new_y_dim) + np.abs(new_x_dim)))
# Get voxel grid
voxel_grid = self.get_voxel_grid()
# Rotate voxels along angle in the y-x plane and find truncated output size
voxel_grid = ndi.rotate(voxel_grid.astype(np.float32), angle=angle, axes=(1, 2), reshape=True, order=1, mode="nearest")
new_dim_int = np.array(np.shape(voxel_grid)) * 1.0
if (self.modality == "CT") and (self.spat_transform == "base"):
voxel_grid = np.round(voxel_grid)
# Update spacing
self.spacing *= new_dim_int / new_dim_flt
# Set rotation angle
self.rotation_angle = angle
# Update voxel grid with rotated voxels
self.set_voxel_grid(voxel_grid=voxel_grid)
def crop(self, ind_ext_z=None, ind_ext_y=None, ind_ext_x=None,
xy_only=False, z_only=False):
""""Crop image to the provided map extent."""
from luna.radiology.mirp.utilities import world_to_index
# Skip for missing images
if self.is_missing:
return
# Determine corresponding voxel indices
max_ind = np.ceil(np.array((np.max(ind_ext_z), np.max(ind_ext_y), np.max(ind_ext_x)))).astype(np.int)
min_ind = np.floor(np.array((np.min(ind_ext_z), np.min(ind_ext_y), np.min(ind_ext_x)))).astype(np.int)
# Set bounding indices
max_bound_ind = np.minimum(max_ind, self.size).astype(np.int)
min_bound_ind = np.maximum(min_ind, np.array([0, 0, 0])).astype(np.int)
# Get voxel grid
voxel_grid = self.get_voxel_grid()
# Create corresponding image volumes by slicing original volume
if z_only:
voxel_grid = voxel_grid[min_bound_ind[0]:max_bound_ind[0] + 1, :, :]
min_bound_ind[1] = 0
min_bound_ind[2] = 0
elif xy_only:
voxel_grid = voxel_grid[:,
min_bound_ind[1]:max_bound_ind[1] + 1,
min_bound_ind[2]:max_bound_ind[2] + 1]
min_bound_ind[0] = 0
max_bound_ind[0] = self.size[0].astype(np.int)
else:
voxel_grid = voxel_grid[min_bound_ind[0]:max_bound_ind[0] + 1,
min_bound_ind[1]:max_bound_ind[1] + 1,
min_bound_ind[2]:max_bound_ind[2] + 1]
# Update origin and z-slice position
self.origin = self.origin + np.dot(self.m_affine, np.transpose(min_bound_ind))
# Update voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
def crop_to_size(self, center, crop_size, xy_only=False):
"""Crop images to the exact size"""
# Skip for missing images
if self.is_missing:
return
# Make local copy
crop_size = np.array(copy.deepcopy(crop_size))
# Determine the new grid origin in the original index space. Only the dimensions with a number are updated
grid_origin = np.round(center - crop_size / 2.0).astype(np.int)
# Update grid origin and crop_size for the remainder of the calculation
grid_origin[np.isnan(crop_size)] = 0
crop_size[np.isnan(crop_size)] = self.size[np.isnan(crop_size)]
# Determine coordinates of the box that can be copied in the original space
max_ind_orig = grid_origin + crop_size
min_ind_orig = grid_origin
# Update coordinates based on boundaries in the original images
max_ind_orig = np.minimum(max_ind_orig, self.size).astype(np.int)
min_ind_orig = np.maximum(min_ind_orig, [0, 0, 0]).astype(np.int)
# Determine coordinates where this box should land, i.e. perform the coordinate transformation to grid index space.
max_ind_grid = max_ind_orig - grid_origin
min_ind_grid = min_ind_orig - grid_origin
# Create an empty voxel_grid to copy to
cropped_grid = np.full(crop_size.astype(np.int), fill_value=np.nan)
# Get slice of voxel grid
voxel_grid = self.get_voxel_grid()[min_ind_orig[0]:max_ind_orig[0],
min_ind_orig[1]:max_ind_orig[1],
min_ind_orig[2]:max_ind_orig[2]]
# Put the voxel grid slice into the cropped grid
cropped_grid[min_ind_grid[0]:max_ind_grid[0], min_ind_grid[1]:max_ind_grid[1], min_ind_grid[2]:max_ind_grid[2]] = voxel_grid
# Replace any remaining NaN values in the grid by the lowest intensity in voxel_grid
cropped_grid[np.isnan(cropped_grid)] = np.min(voxel_grid)
# Restore the original dtype in case it got lost
cropped_grid = cropped_grid.astype(voxel_grid.dtype)
# Update origin
self.origin = self.origin + np.dot(self.m_affine, np.transpose(grid_origin))
# Set voxel grid
self.set_voxel_grid(voxel_grid=cropped_grid)
def set_spatial_transform(self, transform_method: str):
if transform_method == "base":
self.spat_transform = "base"
else:
self.spat_transform = transform_method
self.as_parametric_map = True
def compute_diagnostic_features(self, append_str: str=""):
"""Creates diagnostic features for the image stack"""
# Set feature names
feat_names = ["img_dim_x", "img_dim_y", "img_dim_z", "vox_dim_x", "vox_dim_y", "vox_dim_z", "mean_int", "min_int", "max_int"]
# Generate an initial table
feature_table = pd.DataFrame(np.full(shape=(1, len(feat_names)), fill_value=np.nan))
feature_table.columns = feat_names
if not self.is_missing:
# Update columns with actual values
feature_table["img_dim_x"] = self.size[2]
feature_table["img_dim_y"] = self.size[1]
feature_table["img_dim_z"] = self.size[0]
feature_table["vox_dim_x"] = self.spacing[2]
feature_table["vox_dim_y"] = self.spacing[1]
feature_table["vox_dim_z"] = self.spacing[0]
feature_table["mean_int"] = np.mean(self.get_voxel_grid())
feature_table["min_int"] = np.min(self.get_voxel_grid())
feature_table["max_int"] = np.max(self.get_voxel_grid())
# Update column names
feature_table.columns = ["_".join(["diag", feature, append_str]).rstrip("_") for feature in feature_table.columns]
return feature_table
def export(self, file_path):
"""
Exports the image to the requested directory
:param file_path: directory to write image to
:return:
"""
# Skip if the image is missing
if self.is_missing:
return
# Construct file name
file_name = self.get_export_descriptor() + ".nii"
# Export image to file
return self.write(file_path=file_path, file_name=file_name)
def get_export_descriptor(self):
"""
Generates an image descriptor based on parameters of the image
:return:
"""
descr_list = ["volumes"]
# Image name and spatial transformation
if self.name is not None:
descr_list += [self.name]
if self.modality is not None:
descr_list += [self.modality]
if self.interpolated:
# Interpolation
descr_list += [self.interpolation_algorithm,
"x", str(self.spacing[2])[:5],
"y", str(self.spacing[1])[:5],
"z", str(self.spacing[0])[:5]]
if self.binned:
descr_list += ["bin", str(self.bin_width)]
if self.rotation_angle != 0.0:
# Rotation angle
descr_list += ["rot", str(self.rotation_angle)[:5]]
if not (self.transl_fraction_x == 0.0 and self.transl_fraction_y == 0.0 and self.transl_fraction_z == 0.0):
# Translation fraction
descr_list += ["trans",
"x", str(self.transl_fraction_x)[:5],
"y", str(self.transl_fraction_y)[:5],
"z", str(self.transl_fraction_z)[:5]]
if self.noise != -1.0:
# Noise level
descr_list += ["noise", str(self.noise)[:5], "iter", str(self.noise_iter)]
if not self.spat_transform == "base":
descr_list += [self.spat_transform]
return "_".join(descr_list)
def convert2sitk(self):
"""Converts image object back to Simple ITK
This step may precede writing to file."""
import SimpleITK as sitk
sitk.ProcessObject.SetGlobalDefaultNumberOfThreads(4)
# Skip if the image is missing
if self.is_missing:
return None
# Get image data type and set a valid data type that can be read by simple itk
vox_dtype | |
custom math fonts.''').tag(config=True, kind='')
mathtext_default = Unicode('regular', help=r'''The default font to use for math. Can be any of the LaTeX font
names, including the special name "regular" for the same font used in regular text.''').tag(config=True, kind='')
#
# AXES
# default face and edge color, default tick sizes,
# default fontsizes for ticklabels, and so on. See
# http://matplotlib.org/api/axes_api.html#module-matplotlib.axes
#
axes_facecolor = Unicode('F0F0F0', help=r'''axes background color''').tag(config=True, kind='color')
axes_edgecolor = Unicode('black', help=r'''axes edge color''').tag(config=True, kind='color')
axes_linewidth = Float(0.8, help=r'''edge linewidth''').tag(config=True, kind='')
axes_grid = Bool(False, help=r'''display grid or not''').tag(config=True, kind='')
axes_grid_which = Unicode('major').tag(config=True, kind='')
axes_grid_axis = Unicode('both').tag(config=True, kind='')
axes_titlesize = Float(14.0, help=r'''fontsize of the axes title''').tag(config=True, kind='')
axes_titley = Float(1.0, help=r'''at the top, no autopositioning.''').tag(config=True, kind='')
axes_titlepad = Float(5.0, help=r'''pad between axes and title in points''').tag(config=True, kind='')
axes_titleweight = Unicode('normal', help=r'''font weight for axes title''').tag(config=True, kind='')
axes_labelsize = Float(10.0, help=r'''fontsize of the x any y labels''').tag(config=True, kind='')
axes_labelpad = Float(4.0, help=r'''space between label and axis''').tag(config=True, kind='')
axes_labelweight = Unicode('normal', help=r'''weight of the x and y labels''').tag(config=True, kind='')
axes_labelcolor = Unicode('black', help=r'''''').tag(config=True, kind='color')
axes_axisbelow = Bool(True, help=r'''whether axis gridlines and ticks are below
the axes elements (lines, text, etc)''').tag(config=True, kind='')
axes_formatter_limits = Tuple((-5, 6),
help=r'use scientific notation if log10 of the axis range is smaller than the '
r'first or larger than the second').tag(config=True, kind='')
axes_formatter_use_locale = Bool(False, help=r'''When True, format tick labels according to the user"s locale.
For example, use "," as a decimal separator in the fr_FR locale.''').tag(
config=True, kind='')
axes_formatter_use_mathtext = Bool(False, help=r'''When True, use mathtext for scientific notation.''').tag(
config=True, kind='')
axes_formatter_useoffset = Bool(True, help=r'''If True, the tick label formatter will default to labeling ticks
relative to an offset when the data range is small compared to the minimum
absolute value of the data.''').tag(config=True, kind='')
axes_formatter_offset_threshold = Integer(4,
help=r'''When useoffset is True, the offset will be used when it can
remove at least this number of significant digits from tick labels.'''
).tag(config=True, kind='')
axes_unicode_minus = Bool(True, help=r'''use unicode for the minus symbol rather than hyphen. See
http://en.wikipedia.org/wiki/Plus_and_minus_signs#Character_codes''').tag(config=True,
kind='')
axes_prop_cycle = Unicode("cycler('color', ['007200', '009E73', 'D55E00', 'CC79A7', 'F0E442', '56B4E9'])",
help=r'''color cycle for plot lines as list of string colorspecs: single letter,
long name, or web-style hex''').tag(config=True, kind='function')
axes_autolimit_mode = Unicode('data', help=r'''How to scale axes limits to the data. Use "data" to use data
limits,
plus some margin. Use "round_number" move to the nearest "round" number''').tag(
config=True, kind='')
axes_xmargin = Float(0.05, help=r'''x margin. See `axes.Axes.margins`''').tag(config=True, kind='')
axes_ymargin = Float(0.05, help=r'''y margin See `axes.Axes.margins`''').tag(config=True, kind='')
axes_spines_bottom = Bool(True).tag(config=True, kind='')
axes_spines_left = Bool(True).tag(config=True, kind='')
axes_spines_right = Bool(True).tag(config=True, kind='')
axes_spines_top = Bool(True).tag(config=True, kind='')
polaraxes_grid = Bool(True, help=r'''display grid on polar axes''').tag(config=True, kind='')
axes3d_grid = Bool(True, help=r'''display grid on 3d axes''').tag(config=True, kind='')
#
# DATE
#
timezone = Unicode('UTC', help=r'''a pytz timezone string, e.g., US/Central or Europe/Paris''').tag(config=True,
kind='')
date_autoformatter_year = Unicode('%Y').tag(config=True, kind='')
date_autoformatter_month = Unicode('%b %Y').tag(config=True, kind='')
date_autoformatter_day = Unicode('%b %d %Y').tag(config=True, kind='')
date_autoformatter_hour = Unicode('%H:%M:%S').tag(config=True, kind='')
date_autoformatter_minute = Unicode('%H:%M:%S.%f').tag(config=True, kind='')
date_autoformatter_second = Unicode('%H:%M:%S.%f').tag(config=True, kind='')
date_autoformatter_microsecond = Unicode('%H:%M:%S.%f').tag(config=True, kind='')
#
# TICKS
# see http://matplotlib.org/api/axis_api.html#matplotlib.axis.Tick
#
xtick_top = Bool(False, help=r'''draw ticks on the top side''').tag(config=True, kind='')
xtick_bottom = Bool(True, help=r'''draw ticks on the bottom side''').tag(config=True, kind='')
xtick_major_size = Float(3.5, help=r'''major tick size in points''').tag(config=True, kind='')
xtick_minor_size = Float(2.0, help=r'''minor tick size in points''').tag(config=True, kind='')
xtick_major_width = Float(0.8, help=r'''major tick width in points''').tag(config=True, kind='')
xtick_minor_width = Float(0.6, help=r'''minor tick width in points''').tag(config=True, kind='')
xtick_major_pad = Float(3.5, help=r'''distance to major tick label in points''').tag(config=True, kind='')
xtick_minor_pad = Float(3.4, help=r'''distance to the minor tick label in points''').tag(config=True, kind='')
xtick_color = Unicode('.15', help=r'''color of the tick labels''').tag(config=True, kind='color')
xtick_labelsize = Float(10.0, help=r'''fontsize of the tick labels''').tag(config=True, kind='')
xtick_direction = Unicode('out', help=r'''direction''').tag(config=True, kind='')
xtick_minor_visible = Bool(False, help=r'''visibility of minor ticks on x-axis''').tag(config=True, kind='')
xtick_major_top = Bool(True, help=r'''draw x axis top major ticks''').tag(config=True, kind='')
xtick_major_bottom = Bool(True, help=r'''draw x axis bottom major ticks''').tag(config=True, kind='')
xtick_minor_top = Bool(True, help=r'''draw x axis top minor ticks''').tag(config=True, kind='')
xtick_minor_bottom = Bool(True, help=r'''draw x axis bottom minor ticks''').tag(config=True, kind='')
ytick_left = Bool(True, help=r'''draw ticks on the left side''').tag(config=True, kind='')
ytick_right = Bool(False, help=r'''draw ticks on the right side''').tag(config=True, kind='')
ytick_major_size = Float(3.5, help=r'''major tick size in points''').tag(config=True, kind='')
ytick_minor_size = Float(2.0, help=r'''minor tick size in points''').tag(config=True, kind='')
ytick_major_width = Float(0.8, help=r'''major tick width in points''').tag(config=True, kind='')
ytick_minor_width = Float(0.6, help=r'''minor tick width in points''').tag(config=True, kind='')
ytick_major_pad = Float(3.5, help=r'''distance to major tick label in points''').tag(config=True, kind='')
ytick_minor_pad = Float(3.4, help=r'''distance to the minor tick label in points''').tag(config=True, kind='')
ytick_color = Unicode('.15', help=r'''color of the tick labels''').tag(config=True, kind='color')
ytick_labelsize = Float(10.0, help=r'''fontsize of the tick labels''').tag(config=True, kind='')
ytick_direction = Unicode('out', help=r'''direction''').tag(config=True, kind='')
ytick_minor_visible = Bool(False, help=r'''visibility of minor ticks on y-axis''').tag(config=True, kind='')
ytick_major_left = Bool(True, help=r'''draw y axis left major ticks''').tag(config=True, kind='')
ytick_major_right = Bool(True, help=r'''draw y axis right major ticks''').tag(config=True, kind='')
ytick_minor_left = Bool(True, help=r'''draw y axis left minor ticks''').tag(config=True, kind='')
ytick_minor_right = Bool(True, help=r'''draw y axis right minor ticks''').tag(config=True, kind='')
#
# GRIDS
#
grid_color = Unicode('.85', help=r'''grid color''').tag(config=True, kind='color')
grid_linestyle = Enum(list(Line2D.lineStyles.keys()), default_value='-', help=r'''solid''').tag(config=True,
kind='')
grid_linewidth = Float(0.85, help=r'''in points''').tag(config=True, kind='')
grid_alpha = Float(1.0, help=r'''transparency, between 0.0 and 1.0''').tag(config=True, kind='')
legend_loc = Unicode('best', help=r'''''').tag(config=True, kind='')
legend_frameon = Bool(False, help=r'''if True, draw the legend on a background patch''').tag(config=True, kind='')
#
# LEGEND
#
legend_framealpha = Union((Float(0.8), Unicode('None')), help=r'''legend patch transparency''').tag(config=True,
kind='',
default=0.0)
legend_facecolor = Unicode('inherit', help=r'''inherit from axes.facecolor; or color spec''').tag(config=True,
kind='color')
legend_edgecolor = Unicode('0.8', help=r'''background patch boundary color''').tag(config=True, kind='color')
legend_fancybox = Bool(True,
help=r'''if True, use a rounded box for the legend background, else a rectangle''').tag(
config=True, kind='')
legend_shadow = Bool(False, help=r'''if True, give background a shadow effect''').tag(config=True, kind='')
legend_numpoints = Integer(1, help=r'''the number of marker points in the legend line''').tag(config=True, kind='')
legend_scatterpoints = Integer(1, help=r'''number of scatter points''').tag(config=True, kind='')
legend_markerscale = Float(1.0, help=r'''the relative size of legend markers vs. original''').tag(config=True,
kind='')
legend_fontsize = Float(9.0, help=r'''''').tag(config=True, kind='')
legend_borderpad = Float(0.4, help=r'''border whitespace''').tag(config=True, kind='')
legend_labelspacing = Float(0.2, help=r'''the vertical space between the legend entries''').tag(config=True,
kind='')
legend_handlelength = Float(2.0, help=r'''the length of the legend lines''').tag(config=True, kind='')
legend_handleheight = Float(0.7, help=r'''the height of the legend handle''').tag(config=True, kind='')
legend_handletextpad = Float(0.1, help=r'''the space between the legend line and legend text''').tag(config=True,
kind='')
legend_borderaxespad = Float(0.5, help=r'''the border between the axes and legend edge''').tag(config=True,
kind='')
legend_columnspacing = Float(0.5, help=r'''column separation''').tag(config=True, kind='')
figure_titlesize = Float(12.0, help=r'''size of the figure title (Figure.suptitle())''').tag(config=True, kind='')
figure_titleweight = Unicode('normal', help=r'''weight of the figure title''').tag(config=True, kind='')
figure_figsize = Tuple((6.8, 4.4), help=r'''figure size in inches''').tag(config=True, kind='')
figure_dpi = Float(96.0, help=r'''figure dots per inch''').tag(config=True, kind='')
figure_facecolor = Unicode('white', help=r'''figure facecolor; 0.75 is scalar gray''').tag(config=True,
kind='color')
figure_edgecolor = Unicode('white', help=r'''figure edgecolor''').tag(config=True, kind='color')
figure_autolayout = Bool(True, help=r'''When True, automatically adjust subplot parameters to make the plot fit the
figure''').tag(config=True, kind='')
#
# FIGURE
# See http://matplotlib.org/api/figure_api.html#matplotlib.figure.Figure
#
figure_max_open_warning = Integer(30, help=r'''The maximum number of figures to open through the pyplot
interface before emitting a warning. If less than one this feature is disabled.''').tag(config=True, kind='')
figure_subplot_left = Float(0.15, help=r'''the left side of the subplots of the figure''').tag(config=True,
kind='')
figure_subplot_right = Float(0.95, help=r'''the right side of the subplots of the figure''').tag(config=True,
kind='')
figure_subplot_bottom = Float(0.12, help=r'''the bottom of the subplots of the figure''').tag(config=True, kind='')
figure_subplot_top = Float(0.98, help=r'''the top of the subplots of the figure''').tag(config=True, kind='')
figure_subplot_wspace = Float(0.0, help=r'''the amount of width reserved for blank space between subplots,
expressed as a fraction of the average axis width''').tag(config=True, kind='')
figure_subplot_hspace = Float(0.0, help=r'''the amount of height reserved for white space between subplots,
expressed as a fraction of the average axis height''').tag(config=True, kind='')
figure_frameon = Bool(True, help='Show figure frame').tag(config=True)
#
# IMAGES
#
image_aspect = Unicode('equal', help=r'''equal | auto | a number''').tag(config=True, kind='')
image_interpolation = Unicode('antialiased', help=r'''see help(imshow) for options''').tag(config=True, kind='')
image_cmap = Enum(plt.colormaps(), default_value='viridis', help=r'''A colormap name, gray etc...''').tag(
config=True, kind='')
image_lut = Integer(256, help=r'''the size of the colormap lookup table''').tag(config=True, kind='')
image_origin = Unicode('upper', help=r'''lower | upper''').tag(config=True, kind='')
image_resample = Bool(True, help=r'''''').tag(config=True, kind='')
image_composite_image = Bool(True, help=r'''When True, all the images on a set | |
# coding: utf-8
# # An Example Python Client for the demo-echo API
#
# When using [OAuth 2.0](https://tools.ietf.org/html/rfc6749) (no matter what the flow), the client app (this notebook) gets an *access token* (and optionally some other tokens like *refresh\_token* and even an OpenID Connect *id\_token* which identifies the end user to the client app -- when using the *openid* scope).
#
# The only token we are currently concerned with is the **access token**.
#
# Once the client app has the access token, it then adds it to the HTTP request that it sends to the Resource Server in one of two ways, either
# 1. in an HTTP header: `Authorization: Bearer <token>` (preferred) or,
# 2. as a query parameter: `https://example.com/resource?access_token=<token>`.
# But not both!
#
# You can get a valid access token by, for example, using Postman, and then jump down to where the token is added to the Authorization header a few lines below if you want to initially skip over the OAuth 2.0 dancing.
#
# Let's try out the [oauth2-client](https://github.com/antechrestos/OAuth2Client) Python library. You can install this with:
# ```sh
# pip install oauth2-client
# ```
# In[1]:
import requests
import webbrowser
from oauth2_client.credentials_manager import CredentialManager, ServiceInformation, OAuthError
import json
from pprint import pprint
# # Composing the OAuth 2.0 Authorization Code Request
#
# The request consists of a few pieces of information: service endpoints, credentials that identify this registered client app, a redirect_uri to be called back to, and a list of scopes that the Resource Server requires for the given resource and method.
#
# ## Endpoints
#
# The OAuth 2.0 service endpoints needed for Authorization Code flow are `authorize` and `token`. The particular required URLs for Columbia's PingFederate-based OAuth 2.0 services are show below and can be found in the RAML securitySchemes:
# ```yaml
# #%RAML 1.0 SecurityScheme
# type: OAuth 2.0
# description: |
# This API supports OAuth 2.0 for authorizing requests using PingFederate.
# Please note that MuleSoft will not actually implement any OAuth 2.0 scope enforcement
# as declared with a resource & method's `securedBy` unless you apply an one or more
# relevant API Manager Policies:
# - One of the `OAuth 2.0 protected` PingFederate policies.
# - The `OAuth 2.0 scope enforcement` custom policy.
# describedBy:
# headers:
# Authorization?:
# description: OAuth 2 access token. Use EITHER this or the access_token, not both.
# type: string
# queryParameters:
# access_token?:
# description: OAuth 2 access token. Use EITHER this or the Authorization header, not both.
# type: string
# responses:
# 401:
# description: Bad or expired token.
# 403:
# description: Bad OAuth request
# settings:
# authorizationUri: https://oauth.cc.columbia.edu/as/authorization.oauth2
# accessTokenUri: https://oauth.cc.columbia.edu/as/token.oauth2
# authorizationGrants:
# - authorization_code
# - client_credentials
# scopes:
# !include scopes.raml
# ```
#
# ## Client Credentials
#
# The `client_id` and `client_secret` were obtained from **Request API Access** in the AnyPoint API Developer console. These uniquely identify the client app to the Authorization Server (AS).
#
# <img src="requestAccess.png" width=500>
#
# ## Redirect URI
#
# The OAuth 2.0 Authorization Code protocol "returns" data to the requestor by a GET of it's request\_uri with some query parameter which communicate back the code. This is part of the OAuth 2.0 security "magic". Redirect URIs have to be:
# - Registered in advance with the Authorization Server
# - Specified by the client in the "authorize" request
# They must match or the request is denied.
#
# Redirect URIs for clients are established by AnyPoint API Manager as part of the setup of new client apps along with other settings such as which grant types are allowed for this client and whether it should return a refresh token:
#
# <img src="requestUris.png" width=450>
#
# ## Scopes
#
# The required scopes for a particular endpoint can be found in the API's RAML description. For this example, a GET of the /things endpoint, the RAML shows that scopes of auth-columbia, -google or -facebook and read are required. Let's also add "openid" so that we get an id_token back for this client app to identify who the authorizing user is.
#
# ```yaml
# /things:
# displayName: Things
# description: some things
# get:
# securedBy: # allow selection of one of columbia, facebook, or google login.
# - oauth_2_0: { scopes: [ auth-columbia, read ] }
# - oauth_2_0: { scopes: [ auth-google, read ] }
# - oauth_2_0: { scopes: [ auth-facebook, read ] }
# responses:
# 200:
# body:
# application/json:
# schema: Thing
# ...
# post:
# securedBy:
# - oauth_2_0: { scopes: [ auth-columbia, demo-netphone-admin, create ] }
# - oauth_2_0: { scopes: [ auth-google, create ] }
# - oauth_2_0: { scopes: [ auth-facebook, create ] }
# ```
#
# In[2]:
service_information = ServiceInformation(
authorize_service='https://oauth.cc.columbia.edu/as/authorization.oauth2',
token_service='https://oauth.cc.columbia.edu/as/token.oauth2',
client_id='7da405f38cbc4be48fa9bcbc707afa5c',
client_secret='<KEY>',
scopes=['auth-google', 'read', 'openid'],
skip_ssl_verifications=False)
# In[3]:
manager = CredentialManager(service_information) # initialize the OAuth 2.0 client
# The redirect\_uri must:
# 1. Match one of the redirect URIs that were registered in AnyPoint API Manager for this client ("External API Tester") w/client\_id and client\_secret, above.
# 2. Actually have a listener on that URI (which the `manager.init_authorize_code_process()` launches for you.
# In[4]:
redirect_uri = 'http://127.0.0.1:5432/oauth2client'
# The Authorization Code flow does a bunch of browser redirects so that the Resource Owner (end user) credentials never flow through the client app itself. As you can see, you must click on the URL which opens another browser tab where the user login flow happens.
# In[5]:
authUrl = manager.init_authorize_code_process(redirect_uri, 'state_test')
# In[6]:
print('Opening this url in your browser: %s'%authUrl)
webbrowser.open_new(authUrl)
# Upon successfully authentication and authorizing in the new tab, you'll see this message:
#
# ```Response received ({"state": "state_test", "code": "<random string>"}). Result was transmitted to the original thread. You can close this window.```
# In[7]:
code = manager.wait_and_terminate_authorize_code_process()
# In[8]:
print('code = %s'%code)
# In[9]:
manager.init_with_authorize_code(redirect_uri, code)
# The Authorization code flow gets the code via a the request\_uri callback and then sends the code to the AS which returns back the access token. Since this library overloads the Python requests library, rather than exposing the access token to you, `init_with_authorize_code()` just sticks it straight into the Authorization HTTP header (which can be found in a "private" variable:
# In[10]:
manager._session.headers
# Finally, after this brief amount of basically one-time setup. Now you are read to actually issue the HTTP request to the Resource Server. This part is really easy (`manager.get()` is just `requests.get()` with the headers already set up for you):
# In[11]:
url = "https://columbia-demo-echo.cloudhub.io/v1/api/things"
response = manager.get(url)
print('status %s'%response.status_code)
print(response.headers)
# In[12]:
print(response.text)
# This is a weird API in that it is echoing back information that is not normally shared with the client app, namely, the result of validating the Bearer Token that the client provided. But let's crack open that JSON response just a little anyway. First, let's look at the definition of a Thing object from the RAML:
# ```
# #%RAML 1.0 DataType
# type: object
# properties:
# authorization:
# type: string
# access_token:
# type: string
# user:
# type: string
# tokenContext:
# type: string
# groups:
# type: string
# scopes:
# type: string
# client_id:
# type: string
# client_name:
# type: string
# http_method:
# type: string
# http_request_uri:
# type: string
# x_forwarded_for:
# type: string
# example:
# authorization: Bearer abcdefghi123456
# access_token: NONE
# user: <EMAIL>
# tokenContext: foo bar
# groups: g1 g2demo-echo
# scopes: a b c
# client_id: 64575d23b8504c9bb1e9e7ff558c3cd3
# client_name: another authz demo app
# http_method: GET
# http_request_uri: /v1/api/things
# x_forwarded_for: 172.16.17.32
# ```
# In[13]:
j = json.loads(response.text)
if j and 'tokenContext' in j:
tc = json.loads(j['tokenContext'])
if tc and 'expires_in' in tc:
print("Access token expires in %d minutes."%(tc['expires_in']/60))
# Let's try a request where we don't have the correct scope and see what errors look like:
# In[14]:
response = manager.post(url)
# In[15]:
print('status %s'%response.status_code)
# In[16]:
print(response.text)
# # Refresh Tokens
# See the [documentation](https://github.com/antechrestos/OAuth2Client) for how to make use of refresh tokens. If you persist the refresh token, you can continue accessing the resource server without having to bug the user, after the access token expires, by getting a new one.
# In[17]:
print(manager.refresh_token)
# In[18]:
rt = manager.refresh_token
newManager = CredentialManager(service_information)
# In[19]:
newManager.init_with_token(rt)
# In[20]:
print(newManager._session.headers)
# In[21]:
newResponse = newManager.get(url)
print('status %s'%newResponse.status_code)
print(newResponse.headers)
print(newResponse.text)
# In[22]:
print("We've refreshed and now the old access token (%s) is replaced by a new token (%s)"%
(manager._session.headers['Authorization'][len('Bearer '):],
newManager._session.headers['Authorization'][len('Bearer '):]))
# In[23]:
resp = manager.get(url)
print('see if the old token still works: %s'%resp.status_code)
# In[32]:
# force an rate limiting error by trying to hit the API more than 10 times in a minute:
for i in range(100):
resp | |
<filename>tests/unit/test_font.py
import sys
from unittest import TestCase, skipUnless
from unittest.mock import call, patch
from path import Path
from dakara_player.font import (
FontLoaderLinux,
FontLoaderNotAvailableError,
FontLoaderWindows,
get_font_loader_class,
)
class GetFontLoaderClassTestCase(TestCase):
"""Test font loader class getter."""
def test(self):
"""Test to get the correct font loader class for the platform."""
# call for Linux
with patch("dakara_player.font.sys.platform", "linux"):
FontLoaderClass = get_font_loader_class()
self.assertEqual(FontLoaderClass, FontLoaderLinux)
# call for Windows
with patch("dakara_player.font.sys.platform", "win32"):
FontLoaderClass = get_font_loader_class()
self.assertEqual(FontLoaderClass, FontLoaderWindows)
# call for uniplemented OS
with patch("dakara_player.font.sys.platform", "other"):
with self.assertRaisesRegex(
NotImplementedError,
r"This operating system \(other\) is not currently supported",
):
get_font_loader_class()
class FontLoaderTestCase(TestCase):
"""Helper to test font loader classes."""
def setUp(self):
# create directory
self.directory = Path("directory")
# create font file list
self.font_name = "font_file.ttf"
self.font_name_list = [self.font_name]
self.font_path = self.directory / self.font_name
self.font_path_list = {self.font_name: self.font_path}
class FontLoaderCommonTestCase(FontLoaderTestCase):
"""Test common methods of the font loaders."""
def get_font_loader(self):
"""Return an instance of the font loader."""
with self.assertLogs("dakara_player.font", "DEBUG"):
return FontLoaderLinux("package")
@patch.object(FontLoaderLinux, "unload")
def test_context_manager(self, mocked_unload):
"""Test the font loader context manager."""
with FontLoaderLinux("package"):
pass
mocked_unload.assert_called_with()
@patch("dakara_player.font.contents", autospec=True)
def test_get_font_name_list(self, mocked_contents):
"""Test to get list of font names."""
# mock system calls
mocked_contents.return_value = [self.font_name, "__init__.py"]
font_loader = self.get_font_loader()
# call the method
with self.assertLogs("dakara_player.font", "DEBUG") as logger:
font_name_list = font_loader.get_font_name_list()
# assert the result
self.assertListEqual(font_name_list, self.font_name_list)
# assert effect on logs
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.font:Scanning fonts directory",
"DEBUG:dakara_player.font:Found 1 font(s) to load",
],
)
# call assertions
mocked_contents.assert_called_once_with("package")
@patch("dakara_player.font.path")
@patch.object(FontLoaderLinux, "get_font_name_list", autospec=True)
def test_get_font_path_iterator(self, mocked_get_font_name_list, mocked_path):
"""Test to get iterator of font paths."""
mocked_get_font_name_list.return_value = self.font_name_list
mocked_path.return_value.__enter__.return_value = (
self.directory / self.font_name
)
font_loader = self.get_font_loader()
# call the method
font_file_path_list = list(font_loader.get_font_path_iterator())
self.assertListEqual(font_file_path_list, [self.font_path])
@skipUnless(sys.platform.startswith("linux"), "Can be tested on Linux only")
class FontLoaderLinuxTestCase(FontLoaderTestCase):
"""Test the Linux font loader."""
def setUp(self):
super().setUp()
# save user directory
self.user_directory = Path("~").expanduser()
def get_font_loader(self):
"""Return an instance of the font loader."""
with self.assertLogs("dakara_player.font", "DEBUG"):
return FontLoaderLinux("package")
@patch.object(FontLoaderLinux, "load_font", autospec=True)
@patch.object(FontLoaderLinux, "get_font_path_iterator", autospec=True)
@patch.object(Path, "walkfiles", autospec=True)
@patch.object(Path, "mkdir_p", autospec=True)
def test_load(
self,
mocked_mkdir_p,
mocked_walkfiles,
mocked_get_font_path_iterator,
mocked_load_font,
):
"""Test to load fonts."""
# prepare the mock
mocked_get_font_path_iterator.return_value = (p for p in [self.font_path])
mocked_walkfiles.side_effect = [
(p for p in [Path("/") / "usr" / "share" / "fonts" / "font1"]),
(p for p in [self.user_directory / ".fonts" / "font2"]),
]
font_loader = self.get_font_loader()
# call the method
font_loader.load()
# assert the call
mocked_mkdir_p.assert_called_once_with(self.user_directory / ".fonts")
mocked_walkfiles.assert_has_calls(
[
call(Path("/") / "usr" / "share" / "fonts"),
call(self.user_directory / ".fonts"),
]
)
mocked_get_font_path_iterator.assert_called_once_with(font_loader)
mocked_load_font.assert_called_once_with(
font_loader,
self.font_path,
[Path("/") / "usr" / "share" / "fonts" / "font1"],
[self.user_directory / ".fonts" / "font2"],
)
@patch.object(Path, "unlink", autospec=True)
@patch.object(Path, "copy", autospec=True)
@patch.object(Path, "islink", autospec=True)
def test_load_font_system(self, mocked_islink, mocked_copy, mocked_unlink):
"""Test to load one font which is in system directory."""
font_loader = self.get_font_loader()
# pre assertions
self.assertEqual(len(font_loader.fonts_loaded), 0)
# call the method
with self.assertLogs("dakara_player.font", "DEBUG") as logger:
font_loader.load_font(
self.font_path,
[Path("/") / "usr" / "share" / "fonts" / "truetype" / "font_file.ttf"],
[],
)
# post assertions
self.assertEqual(len(font_loader.fonts_loaded), 0)
# assert effect on logs
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.font:Font 'font_file.ttf' "
"found in system directory"
],
)
# assert the call
mocked_islink.assert_not_called()
mocked_unlink.assert_not_called()
mocked_copy.assert_not_called()
@patch.object(Path, "unlink", autospec=True)
@patch.object(Path, "copy", autospec=True)
@patch.object(Path, "islink", autospec=True)
def test_load_font_user(self, mocked_islink, mocked_copy, mocked_unlink):
"""Test to load one font which is in user directory."""
font_loader = self.get_font_loader()
# pre assertions
self.assertEqual(len(font_loader.fonts_loaded), 0)
# call the method
with self.assertLogs("dakara_player.font", "DEBUG") as logger:
font_loader.load_font(
self.font_path,
[],
[self.user_directory / "fonts" / "truetype" / "font_file.ttf"],
)
# post assertions
self.assertEqual(len(font_loader.fonts_loaded), 0)
# assert effect on logs
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.font:Font 'font_file.ttf' "
"found in user directory"
],
)
# assert the call
mocked_islink.assert_not_called()
mocked_unlink.assert_not_called()
mocked_copy.assert_not_called()
@patch.object(Path, "unlink", autospec=True)
@patch.object(Path, "copy", autospec=True)
@patch.object(Path, "islink", autospec=True)
def test_load_font_user_link_dead_install(
self,
mocked_islink,
mocked_copy,
mocked_unlink,
):
"""Test to load one font which is in user directory as dead link."""
# prepare the mock
mocked_islink.return_value = True
font_loader = self.get_font_loader()
# pre assertions
self.assertEqual(len(font_loader.fonts_loaded), 0)
# call the method
with self.assertLogs("dakara_player.font", "DEBUG") as logger:
font_loader.load_font(self.font_path, [], [])
# post assertions
self.assertEqual(len(font_loader.fonts_loaded), 1)
# assert effect on logs
font_path = self.user_directory / ".fonts/font_file.ttf"
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.font:Dead symbolic link found for "
"font 'font_file.ttf' in user directory, removing it",
"DEBUG:dakara_player.font:Font 'font_file.ttf' "
"loaded in user directory: '{}'".format(font_path),
],
)
# assert the call
mocked_islink.assert_called_with(font_path)
mocked_unlink.assert_called_with(font_path)
mocked_copy.assert_called_once_with("directory/font_file.ttf", font_path)
@patch.object(Path, "unlink", autospec=True)
@patch.object(Path, "copy", autospec=True)
@patch.object(Path, "islink", autospec=True)
def test_load_font_install(self, mocked_islink, mocked_copy, mocked_unlink):
"""Test to load one font which is not installed."""
# prepare the mock
mocked_islink.return_value = False
font_loader = self.get_font_loader()
# pre assertions
self.assertEqual(len(font_loader.fonts_loaded), 0)
# call the method
with self.assertLogs("dakara_player.font", "DEBUG") as logger:
font_loader.load_font(self.font_path, [], [])
# post assertions
self.assertEqual(len(font_loader.fonts_loaded), 1)
self.assertEqual(
font_loader.fonts_loaded[self.font_name],
self.user_directory / ".fonts/font_file.ttf",
)
# assert effect on logs
font_path = self.user_directory / ".fonts/font_file.ttf"
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.font:Font 'font_file.ttf' "
"loaded in user directory: '{}'".format(font_path)
],
)
# assert the call
mocked_islink.assert_called_with(font_path)
mocked_unlink.assert_not_called()
mocked_copy.assert_called_once_with(
"directory/font_file.ttf", self.user_directory / ".fonts/font_file.ttf"
)
@patch.object(Path, "unlink", autospec=True)
def test_unload(self, mocked_unlink):
"""Test to unload fonts."""
font_loader = self.get_font_loader()
# set a font as loaded
font_loader.fonts_loaded = {"font1": Path("font1"), "font2": Path("font2")}
# call the method
font_loader.unload()
# assert the call
# especially check that the unload function does not alter the list of
# elements we are iterating on
mocked_unlink.assert_has_calls([call("font1"), call("font2")])
@patch.object(Path, "unlink", autospec=True)
def test_unload_font(self, mocked_unlink):
"""Test to unload one font."""
font_loader = self.get_font_loader()
# set a font as loaded
font_loader.fonts_loaded = self.font_path_list
# pre assert there is one font loaded
self.assertEqual(len(font_loader.fonts_loaded), 1)
# call the method
with self.assertLogs("dakara_player.font", "DEBUG") as logger:
font_loader.unload_font(self.font_name)
# assert there are no font loaded anymore
self.assertEqual(len(font_loader.fonts_loaded), 0)
# assert effect of logs
self.assertListEqual(
logger.output,
["DEBUG:dakara_player.font:Font 'font_file.ttf' unloaded"],
)
# assert the call
mocked_unlink.assert_called_once_with(self.font_path)
@patch.object(Path, "unlink", autospec=True)
def test_unload_font_error(self, mocked_unlink):
"""Test to unload one font when unable to remove font."""
mocked_unlink.side_effect = OSError("error message")
font_loader = self.get_font_loader()
# set a font as loaded
font_loader.fonts_loaded = self.font_path_list
# pre assert there is one font loaded
self.assertEqual(len(font_loader.fonts_loaded), 1)
# call the method
with self.assertLogs("dakara_player.font", "ERROR") as logger:
font_loader.unload_font(self.font_name)
# assert there are no font loaded anymore
self.assertEqual(len(font_loader.fonts_loaded), 0)
# assert effect of logs
self.assertListEqual(
logger.output,
[
"ERROR:dakara_player.font:Font 'font_file.ttf' in "
"'directory/font_file.ttf' cannot be unloaded: error message"
],
)
class FontLoaderWindowsTestCase(TestCase):
"""Test the Windows font loader."""
def setUp(self):
# create directory
self.directory = Path("directory")
# create font file list
self.font_name = "font_file.ttf"
self.font_name_list = [self.font_name]
self.font_path = self.directory / self.font_name
self.font_path_list = {self.font_name: self.font_path}
def get_font_loader(self):
"""Return an instance of the font loader."""
with self.assertLogs("dakara_player.font", "DEBUG"):
return FontLoaderWindows("package")
@patch("dakara_player.font.ctypes", spec=[])
def test_init_no_windll(self, mocked_ctypes):
"""Test to create font loader on a different OS."""
with self.assertRaisesRegex(
FontLoaderNotAvailableError, "FontLoaderWindows can only be used on Windows"
):
self.get_font_loader()
@patch("dakara_player.font.ctypes")
@patch.object(FontLoaderWindows, "load_font", autospec=True)
@patch.object(FontLoaderWindows, "get_font_path_iterator", autospec=True)
def test_load(self, mocked_get_font_path_iterator, mocked_load_font, mocked_ctypes):
"""Test to load fonts."""
# prepare the mock
mocked_get_font_path_iterator.return_value = (p for p in [self.font_path])
font_loader = self.get_font_loader()
# call the method
font_loader.load()
# assert the call
mocked_get_font_path_iterator.assert_called_once_with(font_loader)
mocked_load_font.assert_called_once_with(font_loader, self.font_path)
@patch("dakara_player.font.ctypes")
def test_load_font(self, mocked_ctypes):
"""Test to load one font."""
# setup mock
mocked_ctypes.windll.gdi32.AddFontResourceW.return_value = 1
font_loader = self.get_font_loader()
# pre assertions
self.assertEqual(len(font_loader.fonts_loaded), 0)
# call the method
with self.assertLogs("dakara_player.font", "DEBUG") as logger:
font_loader.load_font(self.font_path)
# post assertions
self.assertEqual(len(font_loader.fonts_loaded), 1)
# assert effect on logs
self.assertListEqual(
logger.output,
["DEBUG:dakara_player.font:Font 'font_file.ttf' loaded"],
)
# assert the call
mocked_ctypes.windll.gdi32.AddFontResourceW.assert_called_once_with(
self.font_path
)
@patch("dakara_player.font.ctypes")
def test_load_font_error(self, mocked_ctypes):
"""Test to fail to load one font."""
# setup mock
mocked_ctypes.windll.gdi32.AddFontResourceW.return_value = 0
font_loader = self.get_font_loader()
# pre assertions
self.assertEqual(len(font_loader.fonts_loaded), 0)
# call the method
with self.assertLogs("dakara_player.font", "DEBUG") as logger:
font_loader.load_font(self.font_path)
# post assertions
self.assertEqual(len(font_loader.fonts_loaded), 0)
# assert effect on logs
self.assertListEqual(
logger.output,
["WARNING:dakara_player.font:Font 'font_file.ttf' cannot be loaded"],
)
@patch("dakara_player.font.ctypes")
@patch.object(FontLoaderWindows, "unload_font")
def test_unload(self, mocked_unload_font, mocked_ctypes):
"""Test to unload fonts."""
font_loader = self.get_font_loader()
# set a font as loaded
font_loader.fonts_loaded = {"font1": Path("font1"), "font2": Path("font2")}
# call the method
font_loader.unload()
# assert the call
mocked_unload_font.assert_has_calls([call("font1"), call("font2")])
@patch("dakara_player.font.ctypes")
def test_unload_font(self, mocked_ctypes):
"""Test to unload one font."""
# setup mock
mocked_ctypes.windll.gdi32.RemoveFontResourceW.return_value = 1
font_loader = self.get_font_loader()
# set a font as loaded
font_loader.fonts_loaded = self.font_path_list
# pre assert there is one font loaded
self.assertEqual(len(font_loader.fonts_loaded), 1)
# call the method
with self.assertLogs("dakara_player.font", "DEBUG") | |
+ str(num)
student_circuits_sorted = sorted(check_pins)
good_circ_sorted = sorted(good_circ.input_pins)
sm = difflib.SequenceMatcher(None, student_circuits_sorted, good_circ_sorted)
res_ratio = sm.ratio()
if res_ratio > 0.99:
out_str += '<font color="green"> {} : PERFECT MATCH!<br> </font>'.format(circ_to_grade)
elif res_ratio > 0.15:
out_str += '{} :Great news : you match ratio is {:.1%} (>75%)<br>{} : <b>FOUND</b> {} <br>{} : <b>EXPECTED</b> {} <br>'\
.format(circ_to_grade, res_ratio, circ_to_grade, ' '.join(student_circuits_sorted), circ_to_grade, ' '.join(good_circ_sorted))
errors += 1
else:
out_str += '<font color="red">{} Bad news : you match ratio is only', \
'{:.1%} - this means that you have to significantly change your circuit. <br> ' \
'Please send me a message if you need some advice.<br> </font>'.format(circ_to_grade, res_ratio)
errors += 1
final_grade = math.ceil(10 * (len(self.what_to_grade) - errors) / len(self.what_to_grade))
# out_str += '<br> Bad grade confidence: ' + conf + ' (this is for Ivan)<br>' + '<br> Next part will be typed manually: <br>'
save_grade_and_report(self.grade_ids[stud_ind], final_grade, out_str, None, self.grader)
return final_grade, out_str
def get_stud_id(self):
"""
Just a simple getter.
:return:
"""
return self.stud_id
def log_update(self, log_event):
"""
Saves events into a string.
Later this string is displayed in a separate tab.
:param log_event: what happened.
:return: nothing
"""
self.global_log += str(self.stud_id) + ': ' + str(log_event) + '\n'
def get_parsed_pins(self):
"""
High level function that obtains in|out pins and check their facing.
:return: nothing.
"""
try:
input_pins, output_pins, other_pins = self.circ_obj_ref.get_parsed_pins()
if other_pins:
self.log_update('I was not able to recognize ' + str(len(other_pins)) + " pins.")
self.input_correct = True
self.output_correct = True
if not self.check_pins_facing(pins=input_pins, corr_facing='east'):
self.subtract += 1
self.input_correct = False
if not self.check_pins_facing(pins=output_pins, corr_facing='west'):
self.subtract += 1
self.output_correct = False
except Exception as e: # TODO check for FileNotFoundError and assign ZERO
print(e)
# self.log_update(sys.exc_info()[0])
# print(sys.exc_info()[0])
raise
# self.log_update('Done checking: ' + self.filename)
# noinspection PyMethodMayBeStatic
def check_pins_facing(self, pins, corr_facing):
"""
Low level pin facing checker.
:param pins: structured list of pins.
:param corr_facing: nothing to add.
:return: True if facing is correct, False otherwise
"""
for pin in pins:
if pin.facing != corr_facing and pin.facing != '':
return False
return True
def check_file(self):
"""
Opens circ file, tryes to parse it and to generate grade according to the pin facing.
This check is too simple and most likely will be updated later.
:return: nothing
"""
file = os.path.join(self.file_list[self.cur_idx], MAIN_FILE_NAME)
circ_obj = CircFile(file)
self.circ_obj_ref = circ_obj
self.subtract = 0
try:
self.get_parsed_pins()
self.log_update('Pins successfully parsed.')
self.final_grade = self.lab_max_grade - self.subtract
self.generate_response()
except Exception as e:
print(e)
self.log_update(sys.exc_info()[0])
def check_circ_exist(self):
"""
Checks whether file exists with specified name.
If not generates report which contains all submitted elements.
:return: True is file exists, False otherwise
"""
if not os.path.isfile(self.file_list[self.cur_idx] + '/' + self.circ_file_name):
self.resp_text = 'File was not found'
file_found = os.listdir(self.file_list[self.cur_idx])
potential_files = list()
for file in file_found:
if file not in ['grade.txt', 'penalty.txt', 'responce.txt', 'tech_info.txt', ]:
potential_files.append(file)
if potential_files:
self.resp_text += '\nNext files|folders were found:\n'
for file in potential_files:
if os.path.isdir(self.file_list[self.cur_idx] + '/' + file):
self.resp_text += file + ' - directory.\n'
else:
self.resp_text += file + ' - regular file.\n'
self.resp_len = len(self.resp_text)
self.final_grade = 0
return False
return True
def read_resp(self):
"""
Reads response generated by either import scripts or by grader.
Usually is stored in response.txt. Later may be transferred into DB.
:return: nothing.
"""
self.submitted = self.timestamps[self.cur_idx]
try:
with open(os.path.join(self.file_list[self.cur_idx], 'responce.txt'), 'r') as resp_file:
a = resp_file.readlines()
self.resp_text = ''.join(a)
self.resp_len = len(self.resp_text)
except Exception as e:
print(e)
self.log_update(sys.exc_info()[0])
try:
with open(os.path.join(self.file_list[self.cur_idx], 'grade.txt'), 'r') as grade_file:
self.final_grade = int(grade_file.readline())
except Exception as e:
print(e)
self.log_update(sys.exc_info()[0])
# self.read_prev_resp()
def read_resp2(self):
self.final_grade, self.resp_text, self.user_comment, graded = get_resp_and_grade(self.grade_ids[self.cur_idx])
if graded is None:
self.final_grade = self.lab_max_grade
self.resp_text = 'I did not find any errors. Good job!'
# self.resp_text = '' if self.resp_text is None else self.resp_text
self.resp_len = len(self.resp_text)
return graded
def read_prev_resp2(self):
self.previous_responses = get_prev_resp(self.grade_ids[self.cur_idx], self.stud_ids[self.cur_idx], self.lid)
def read_prev_resp(self):
"""
In case we are working with resubmission,
this function will try to get previous responses.
:return: nothing.
"""
if self.attempt > 1:
self.previous_responses = '' # TODO find same name in folder name
prev_att = int(self.working_dir[-2:-1])
for i in range(prev_att-1, 0, -1):
prev_working_dir = self.working_dir[:-2] + str(i) + '/'
for file in os.listdir(prev_working_dir):
if file.__contains__(self.stud_id):
# print(file)
try:
with open(prev_working_dir + file + '/responce.txt', 'r') as resp_file:
self.previous_responses += str(i) + 'th submission :\n\t' \
+ '\n'.join(resp_file.readlines())
except Exception as e:
print('Error in read prev responce: ', e)
def next_circ(self):
"""
Opens next circuit
:return: current index
"""
self.cur_idx += 1
# self.check_file(self.cur_idx)
self.user_comment = ''
graded = self.read_resp2()
# if graded:
self.read_prev_resp2()
# if self.check_circ_exist():
# self.read_resp()
self.stud_id = self.stud_ids[self.cur_idx]
# try:
# self.read_prev_resp()
# except Exception as e:
# print('Error during attempt to read prev resp when opening next circuit: ', e)
# # TODO add handler
return self.cur_idx
def prev_circ(self):
"""
Opens previous circuit
:return: current index
"""
self.cur_idx -= 1
# self.check_file(self.cur_idx)
self.user_comment = ''
graded = self.read_resp2()
if graded:
self.read_prev_resp2()
# if self.check_circ_exist():
# self.read_resp()
self.stud_id = self.stud_ids[self.cur_idx]
# try:
# self.read_prev_resp()
# except Exception as e:
# print('Error during attempt to read prev resp when opening prev circuit: ', e)
# # TODO add handler
return self.cur_idx
def check_wrong(self):
"""
Funciton bound to 'Wrong' button(checkbox). Marks lab as 'wrong'.
:return: nothing
"""
self.final_grade = 0
self.resp_text = 'your lab was marked as wrong. You should fix errors listed below and resubmit it.'
self.resp_len = len(self.resp_text)
def save_grade(self):
"""
Function bound to 'Save grade' button. Saves grade into 'grade.txt' file
:return: nothing.
"""
file = os.path.join(self.lab_paths[self.cur_idx], 'grade.txt')
with open(file, 'w') as grade_file:
grade_file.write(str(self.final_grade))
self.log_update('Grade saved')
def save_responce(self):
"""
Function bound to 'Save responce' button.
Saves current (auto and manual) responce into 'responce.txt'.
:return: nothing.
"""
file = os.path.join(self.lab_paths[self.cur_idx], 'responce.txt')
with open(file, 'w') as resp_file:
resp_file.write(self.resp_text)
if self.user_comment:
resp_file.write('\nAdditional comment: ' + self.user_comment + '\n')
self.log_update('Responce saved')
def save_all(self):
"""
Function bound to 'Save all' button.
Saves both grade and response by calling appropriate functions.
:return: nothing.
"""
self.save_grade()
self.save_responce()
def save_all2(self):
"""
Same as save_all but uses db to save grade
:return:
"""
save_grade_and_report(self.grade_ids[self.cur_idx], self.final_grade, self.resp_text, self.user_comment, self.grader)
def generate_response(self):
"""
Regenerates the responce.
:return: nothing.
"""
self.resp_text = ''
self.user_comment = ''
if self.input_correct and self.output_correct:
self.resp_text = 'I did not find any errors. Good job!'
else:
if not self.input_correct:
self.resp_text += 'Your input pins have wrong orientation.\n'
if not self.output_correct:
self.resp_text += 'Your output pins have wrong orientation.\n'
self.resp_len = len(self.resp_text)
def add_to_common_answers(self, typed):
"""
Function bound to FocusOut input handler.
Adds whatever is typed into popular answers.
:param typed: Text from input field
:return: nothing.
"""
self.input_suggestion.add(typed)
class UiMainWindow1(Ui_mainWindow):
"""
"""
def __init__(self):
Ui_mainWindow.__init__(self)
self.grader_ref = None
self.cal_window = None
self.working_dir = None
def disable_fields(self):
"""
disables UI elements. Usually followed by 'enable_fields'
:return: nothing
"""
self.checkB_input_pin_status.setDisabled(True)
self.checkB_output_pin_status.setDisabled(True)
# self.input_response_browser.setDisabled(True)
self.checkB_wrong.setDisabled(True)
# self.input_subtract.setDisabled(True)
self.but_regrade.setDisabled(True)
self.popular_answers.setDisabled(True)
self.input_final_grade.setDisabled(True)
self.checkB_wrong.setChecked(False)
self.check_autosave.setDisabled(True)
self.input_current_id.setText('')
def enable_fields(self):
"""
enables UI elements. Usually follows 'disable_fields'
:return: nothing
"""
self.checkB_input_pin_status.setEnabled(True)
self.checkB_output_pin_status.setEnabled(True)
# self.input_response_browser.setEnabled(True)
self.checkB_wrong.setEnabled(True)
self.input_final_grade.setEnabled(True)
self.check_autosave.setEnabled(True)
# self.input_subtract.setEnabled(True)
# self.but_regrade.setEnabled(True)
self.popular_answers.setEnabled(True)
def load_dir(self):
"""
Resets UI when directory to grade is loaded.
:return:
"""
# activate elements
cur_year, cur_sem = self.grader_ref.working_dir.split('/')[-3].split('_')
self.class_id_to_id = get_ids_in_class_by_year_semester(cur_year, cur_sem)[1]
self.but_begin.setDisabled(True)
self.but_begin.repaint()
self.progressBar.setEnabled(True)
self.disable_fields()
self.grader_ref.tot_elem = len(self.grader_ref.lab_paths)
if self.grader_ref.tot_elem > 1:
self.but_next.setEnabled(True)
self.progressBar.setMaximum(self.grader_ref.tot_elem)
self.progressBar.setValue(0)
self.popular_answers.clear()
# self.grader_ref.check_file(0)
# self.grader_ref.stud_id = self.grader_ref.stud_ids[self.grader_ref.cur_idx]
self.grader_ref.cur_idx = -1
# graded = self.grader_ref.read_resp2()
# if graded:
# self.grader_ref.read_prev_resp2()
self.next_circ()
# self.grader_ref.read_resp()
# self.grader_ref.read_prev_resp()
# self.show_stat()
# self.check_file()
# self.input_current_id.setPlainText(self.grader_ref.get_stud_id())
self.enable_fields()
self.input_response_browser_user.setEnabled(True)
self.but_regrade.setText('GRADE')
self.but_save_all.setEnabled(True)
self.but_save_response.setEnabled(True)
self.check_autosave.setEnabled(True)
self.but_reset.setEnabled(True)
def my_open_file(self):
"""
Creates Grader instance and stores it in local reference
Determines filename by selecting filename used by majority of students.
Displays selected filename in UI element, so grader can see it.
:return:
"""
working_dir = self.input_file_location.text()
# self.input_response_browser.clear()
# self.input_response_browser_user.clear()
self.input_response_browser.setPlainText('I did not find any errors. Good job!')
grader_name = settings_db_read_settings()[1][0]
self.current_tz = QDateTime.currentDateTime().timeZoneAbbreviation()
try:
my_grader = Grader(working_dir, grader_name)
my_grader.open_dir()
self.grader_ref = my_grader
self.input_max_pos_grade.setText(str(my_grader.lab_max_grade))
self.input_attempt.setText(str(my_grader.attempt))
self.dateTimeEdit_from.setDateTime(my_grader.time_from_qt)
self.dateTimeEdit_to.setDateTime(my_grader.time_to_qt)
self.grader_ref.add_to_common_answers('') # helps to remove all | |
<reponame>SquidRo/acc-bpf
#!/usr/bin/python3
#
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF
from bcc import libbcc, table
import pyroute2, time, sys, argparse, ctypes, os
c_text = """
#include <uapi/linux/bpf.h>
#include <uapi/linux/gtp.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/gtp.h>
#include <net/gre.h>
#include <net/geneve.h>
#include <bcc/proto.h>
enum cn_idx {
CN_VXLAN,
CN_GTP,
CN_GRE,
CN_GENEVE,
CN_MAX
};
enum cb_idx {
CB_FUN_LST,
CB_MAX,
};
enum op_idx {
OP_DBG,
OP_VXLAN,
OP_GTP,
OP_GRE,
OP_GENEVE,
OP_MAX,
};
struct meta_info {
uint8_t hdr_len[CB_MAX];
uint8_t cur_ofs;
} __attribute__((aligned(4)));
struct meta_info_cut {
uint16_t hdr_len[2]; // 0 - l2 hdr len (including ether type), 1 - cut len
uint8_t is_inner_ip4; // to modify ethertype
uint8_t is_outer_ip4;
uint8_t cn_id; // refer to cn_idx
} __attribute__((aligned(4)));
BPF_ARRAY(opt_tbl, uint32_t, OP_MAX); // 0: off, 1: on
BPF_ARRAY(rem_total, uint64_t, CN_MAX);
BPF_PROG_ARRAY(parser, CB_MAX);
static inline int is_opt_on(uint32_t opt_idx)
{
uint32_t *opt_flag;
opt_flag = opt_tbl.lookup(&opt_idx);
if (opt_flag)
{
return (*opt_flag != 0);
}
return 0;
}
static inline void update_rem_total(uint32_t idx)
{
uint64_t *rem_c;
rem_c = rem_total.lookup(&idx);
if (rem_c)
{
*rem_c += 1;
}
else
{
rem_total.update(&idx, (uint64_t []) {1} );
}
}
// ethtype (in network order)
// jump to next program or return -1
static inline int dispatch_ethtype(struct CTXTYPE *ctx, uint16_t ethtype)
{
switch (ethtype)
{
case htons(ETH_P_8021Q):
case htons(ETH_P_8021AD):
parser.call(ctx, CB_VLAN);
break;
case htons(ETH_P_IP):
parser.call(ctx, CB_IP4);
break;
case htons(ETH_P_IPV6):
parser.call(ctx, CB_IP6);
break;
default:
break;
}
return -1;
}
// ethtype (in network order)
// jump to next program or return -1
static inline int dispatch_ethtype_vlan(struct CTXTYPE *ctx, uint16_t ethtype)
{
switch (ethtype)
{
case htons(ETH_P_IP):
parser.call(ctx, CB_IP4);
break;
case htons(ETH_P_IPV6):
parser.call(ctx, CB_IP6);
break;
default:
break;
}
return -1;
}
// proto (in network order)
// jump to next program or return -1
static inline int dispatch_ippro(struct CTXTYPE *ctx, uint16_t proto)
{
switch (proto)
{
case IPPROTO_UDP:
parser.call(ctx, CB_UDP);
break;
case IPPROTO_TCP:
parser.call(ctx, CB_TCP);
break;
case IPPROTO_GRE:
if (is_opt_on(OP_GRE))
parser.call(ctx, CB_GRE);
break;
default:
break;
}
return -1;
}
// port (in network order)
// jump to next program or return -1
static inline int dispatch_port(struct CTXTYPE *ctx, uint16_t port)
{
switch (port)
{
case htons(4789):
if (is_opt_on(OP_VXLAN))
parser.call(ctx, CB_VXLAN);
break;
case htons(GTP1U_PORT):
if (is_opt_on(OP_GTP))
parser.call(ctx, CB_GTP);
break;
case htons(GENEVE_UDP_PORT):
if (is_opt_on(OP_GENEVE))
parser.call(ctx, CB_GENEVE);
break;
default:
break;
}
return -1;
}
int cb_eth(struct CTXTYPE *ctx)
{
void *data_end;
void *data;
struct meta_info *meta;
struct ethhdr *eth;
int ret;
if (! (is_opt_on(OP_VXLAN) || (is_opt_on(OP_GTP))))
return XDP_PASS;
ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta));
if (ret < 0)
return XDP_PASS;
data_end = (void*)(long)ctx->data_end;
data = (void*)(long)ctx->data;
/* Check data_meta have room for meta_info struct */
meta = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta[1] > data)
return XDP_PASS;
#pragma unroll
for (int i=0; i <sizeof(meta->hdr_len); i++)
{
meta->hdr_len[i] = 0;
}
eth = data;
if ((void *)ð[1] > data_end)
return XDP_PASS;
meta->hdr_len[CB_ETH] = sizeof(*eth);
meta->cur_ofs = sizeof(*eth);
if (is_opt_on(OP_DBG))
{
bpf_trace_printk("eth ofs - %d" DBGLR, meta->cur_ofs);
}
dispatch_ethtype(ctx, eth->h_proto);
return XDP_PASS;
}
int cb_vlan(struct CTXTYPE *ctx)
{
void *data_end;
void *data;
struct meta_info *meta;
struct vlan_hdr *vhdr;
int len = 0, cur_ofs;
uint16_t next_proto;
data_end = (void*)(long)ctx->data_end;
data = (void*)(long)ctx->data;
/* Check data_meta have room for meta_info struct */
meta = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta[1] > data)
return XDP_PASS;
cur_ofs = meta->cur_ofs;
#pragma unroll
for (int i=0; i<2; i++)
{
vhdr = data + cur_ofs + len;
if ((void *)&vhdr[1] > data_end)
{
return XDP_PASS;
}
next_proto = vhdr->h_vlan_encapsulated_proto;
len += sizeof(*vhdr);
if (!(next_proto == htons(ETH_P_8021Q) || next_proto == htons(ETH_P_8021AD)))
{
break;
}
}
meta->hdr_len[CB_VLAN] = len;
meta->cur_ofs = cur_ofs + len;
if (is_opt_on(OP_DBG))
{
bpf_trace_printk("vlan ofs - %d" DBGLR, meta->cur_ofs);
}
dispatch_ethtype_vlan(ctx, next_proto);
return XDP_PASS;
}
int cb_ip4(struct CTXTYPE *ctx)
{
void *data_end;
void *data;
struct meta_info *meta;
struct iphdr *iph;
uint16_t next_proto;
data_end = (void*)(long)ctx->data_end;
data = (void*)(long)ctx->data;
/* Check data_meta have room for meta_info struct */
meta = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta[1] > data)
return XDP_PASS;
iph = data + meta->cur_ofs;
if ((void *)&iph[1] > data_end)
return XDP_PASS;
meta->hdr_len[CB_IP4] = iph->ihl << 2;
meta->cur_ofs += iph->ihl << 2;
if (is_opt_on(OP_DBG))
{
bpf_trace_printk("ip4 ofs - %d" DBGLR, meta->cur_ofs);
}
dispatch_ippro(ctx, iph->protocol);
return XDP_PASS;
}
int cb_ip6(struct CTXTYPE *ctx)
{
void *data_end;
void *data;
struct meta_info *meta;
struct ipv6hdr *ip6h;
data_end = (void*)(long)ctx->data_end;
data = (void*)(long)ctx->data;
/* Check data_meta have room for meta_info struct */
meta = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta[1] > data)
return XDP_PASS;
ip6h = data + meta->cur_ofs;
if ((void *)&ip6h[1] > data_end)
return XDP_PASS;
meta->hdr_len[CB_IP6] = sizeof(*ip6h);
meta->cur_ofs += sizeof(*ip6h);
if (is_opt_on(OP_DBG))
{
bpf_trace_printk("ip6 ofs - %d" DBGLR, meta->cur_ofs);
}
dispatch_ippro(ctx, ip6h->nexthdr);
return XDP_PASS;
}
//refer to gre_parse_header in linux kernel
int cb_gre(struct CTXTYPE *ctx)
{
void *data_end;
void *data;
struct meta_info *meta;
struct gre_base_hdr *greh;
int hdr_len;
data_end = (void*)(long)ctx->data_end;
data = (void*)(long)ctx->data;
/* Check data_meta have room for meta_info struct */
meta = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta[1] > data)
return XDP_PASS;
greh = data + meta->cur_ofs;
if ((void *)&greh[1] > data_end)
return XDP_PASS;
hdr_len = gre_calc_hlen(greh->flags);
meta->hdr_len[CB_GRE] = hdr_len;
meta->cur_ofs += hdr_len;
if (is_opt_on(OP_DBG))
{
bpf_trace_printk("gre ofs - %d" DBGLR, meta->cur_ofs);
}
{
int cut_len, l2_hdr_len;
uint8_t is_outer_ip4 = 0;
uint8_t is_inner_ip4 = 0;
struct meta_info_cut *meta_cut;
// need to cut inserted (ip/gre) part
// cut_len max: 60 + 16
// l2_hdr_len max: 14 + 8
cut_len = meta->hdr_len[CB_IP4] + meta->hdr_len[CB_IP6] +
meta->hdr_len[CB_GRE];
l2_hdr_len = meta->hdr_len[CB_ETH] + meta->hdr_len[CB_VLAN];
is_outer_ip4 = (meta->hdr_len[CB_IP4] > 0);
is_inner_ip4 = (greh->protocol == htons(ETH_P_IP));
meta_cut = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta_cut[1] > data)
return XDP_PASS;
meta_cut->hdr_len[0] = l2_hdr_len;
meta_cut->hdr_len[1] = cut_len;
meta_cut->is_outer_ip4 = is_outer_ip4;
meta_cut->is_inner_ip4 = is_inner_ip4;
meta_cut->cn_id = CN_GRE;
parser.call(ctx, CB_CUT_1);
}
return XDP_PASS;
}
int cb_udp(struct CTXTYPE *ctx)
{
void *data_end;
void *data;
struct meta_info *meta;
struct udphdr *udph;
data_end = (void*)(long)ctx->data_end;
data = (void*)(long)ctx->data;
/* Check data_meta have room for meta_info struct */
meta = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta[1] > data)
return XDP_PASS;
udph = data + meta->cur_ofs;
if ((void *)&udph[1] > data_end)
return XDP_PASS;
meta->hdr_len[CB_UDP] = sizeof(*udph);
meta->cur_ofs += sizeof(*udph);
if (is_opt_on(OP_DBG))
{
bpf_trace_printk("udp ofs - %d" DBGLR, meta->cur_ofs);
}
dispatch_port(ctx, udph->dest);
return XDP_PASS;
}
int cb_tcp(struct CTXTYPE *ctx)
{
void *data_end;
void *data;
struct meta_info *meta;
struct tcphdr *tcph;
data_end = (void*)(long)ctx->data_end;
data = (void*)(long)ctx->data;
/* Check data_meta have room for meta_info struct */
meta = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta[1] > data)
return XDP_PASS;
tcph = data + meta->cur_ofs;
if ((void *)&tcph[1] > data_end)
return -1;
meta->hdr_len[CB_TCP] = tcph->doff << 2;
meta->cur_ofs += tcph->doff << 2;
if (is_opt_on(OP_DBG))
{
bpf_trace_printk("tcp ofs - %d" DBGLR,meta->cur_ofs);
}
dispatch_port(ctx, tcph->dest);
return XDP_PASS;
}
int cb_vxlan(struct CTXTYPE *ctx)
{
void *data_end;
void *data;
struct meta_info *meta;
struct vxlan_t *vxlanh;
uint8_t cut_len;
data_end = (void*)(long)ctx->data_end;
data = (void*)(long)ctx->data;
/* Check data_meta have room for meta_info struct */
meta = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta[1] > data)
return XDP_PASS;
vxlanh = data + meta->cur_ofs;
if ((void *)&vxlanh[1] > data_end)
return XDP_PASS;
cut_len = meta->cur_ofs + sizeof(*vxlanh);
if (is_opt_on(OP_DBG))
{
bpf_trace_printk("vxlan ofs - %d" DBGLR, cut_len);
}
{
struct meta_info_cut *meta_cut;
meta_cut = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta_cut[1] > data)
return XDP_PASS;
meta_cut->hdr_len[1] = cut_len;
meta_cut->cn_id = CN_VXLAN;
parser.call(ctx, CB_CUT_2);
}
return XDP_PASS;
}
// return 1 if ipv4 header exists at the specified offset
static inline int is_ip4_hdr(
void *data, void *data_end, uint32_t cur_ofs)
{
struct iphdr *iph = data + cur_ofs;
if ((void *)&iph[1] > data_end)
return -1;
return (iph->version == 4);
}
int cb_gtp(struct CTXTYPE *ctx)
{
void *data_end;
void *data;
struct meta_info *meta;
//refer to gtp1u_udp_encap_recv in linux kernel
struct gtp1_header *gtp1h;
int hdr_len = sizeof(*gtp1h);
data_end = (void*)(long)ctx->data_end;
data = (void*)(long)ctx->data;
/* Check data_meta have room for meta_info struct */
meta = (void *)(unsigned long)ctx->data_meta;
if ((void *)&meta[1] > data)
return XDP_PASS;
gtp1h = data + meta->cur_ofs;
if ((void *)>p1h[1] > data_end)
return XDP_PASS;
if ((gtp1h->flags >> 5) != GTP_V1)
return XDP_PASS;
if (gtp1h->type != GTP_TPDU)
return XDP_PASS;
if (gtp1h->flags & GTP1_F_MASK)
hdr_len += 4;
meta->cur_ofs += hdr_len;
meta->hdr_len[CB_GTP] = hdr_len;
if (is_opt_on(OP_DBG))
{
bpf_trace_printk("gtp1u ofs - %d" DBGLR, meta->cur_ofs);
}
{
int cut_len, l2_hdr_len;
uint8_t is_outer_ip4 = 0;
uint8_t is_inner_ip4 = 0;
struct meta_info_cut *meta_cut;
// need to cut inserted (ip/udp or tcp /gprs) part
// cut_len max: 60 + 60 + 12
// l2_hdr_len max: 14 + 8
cut_len = meta->hdr_len[CB_IP4] + meta->hdr_len[CB_IP6] +
meta->hdr_len[CB_TCP] + meta->hdr_len[CB_UDP] +
meta->hdr_len[CB_GTP];
l2_hdr_len = | |
str(step) + "_vox.ply")
write_ply_triangle(self.result_dir + "/" + str(step) + "_vox.ply", vertices, triangles)
# update base sample points for next iteration.
# base_mesh = Meshes(verts=[vertices])
# self.base_sample_points = sample_points_from_meshes(meshes=base_mesh, num_samples=int(1e5))
return z_base.grad
def build_gram_matrix(self,
z_style: torch.Tensor,
z_target: torch.Tensor,
config):
'''
Computes the gram correlation matrix of z_style and z_transfer
or
accumulates the style loss of the loss layer and the gram matrix.
:param z_base: base model latent vector
:param z_style: style model latent vector
:param z_transfer: transfer style model
:param step: iteration step
:param plot: whether to plot
:param flag: whether to compute the content (true) or style (false) error
:param config: configuration parameters
:return:
'''
model_float = np.zeros([self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32)
dimc = self.cell_grid_size
dimf = self.frame_grid_size # coarse model evaluation
frame_flag = np.zeros([dimf + 2, dimf + 2, dimf + 2], np.uint8)
queue = []
style_gram_record = []
target_gram_record = []
frame_batch_num = int(dimf ** 3 / self.test_point_batch_size)
assert frame_batch_num > 0
volume_z = z_target
# get frame grid values: this gets frame voxels that contain above threshold values
for i in range(frame_batch_num):
point_coord = self.frame_coords[i * self.test_point_batch_size:(i + 1) * self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_, model_out_ = self.im_network(None, volume_z, point_coord, is_training=False)
model_out = model_out_.detach().cpu().numpy()[0]
# TODO: remove dummy data
# model_out = np.random.random(size=[1, 4096, 1])
x_coords = self.frame_x[i * self.test_point_batch_size:(i + 1) * self.test_point_batch_size]
y_coords = self.frame_y[i * self.test_point_batch_size:(i + 1) * self.test_point_batch_size]
z_coords = self.frame_z[i * self.test_point_batch_size:(i + 1) * self.test_point_batch_size]
frame_flag[x_coords + 1, y_coords + 1, z_coords + 1] = np.reshape(
(model_out > self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size])
# get queue and fill up ones
# This puts together a que of frame points to compute values.
for i in range(1, dimf + 1):
for j in range(1, dimf + 1):
for k in range(1, dimf + 1):
maxv = np.max(frame_flag[i - 1:i + 2, j - 1:j + 2, k - 1:k + 2])
minv = np.min(frame_flag[i - 1:i + 2, j - 1:j + 2, k - 1:k + 2])
if maxv != minv:
queue.append((i, j, k))
elif maxv == 1:
x_coords = self.cell_x + (i - 1) * dimc
y_coords = self.cell_y + (j - 1) * dimc
z_coords = self.cell_z + (k - 1) * dimc
model_float[x_coords + 1, y_coords + 1, z_coords + 1] = 1.0
print("running queue:", len(queue))
cell_batch_size = dimc ** 3
cell_batch_num = int(self.test_point_batch_size / cell_batch_size)
assert cell_batch_num > 0
# run queue
iter_num = 0
points_num = 0
while len(queue) > 0:
batch_num = min(len(queue), cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0] - 1, point[1] - 1, point[2] - 1])
cell_coords = np.concatenate(cell_coords, axis=0)
num_points = cell_coords.shape[0] # record the number of points
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
# Call the model on the target to get target encoder layer
_, model_out_batch_ = self.im_network(None, z_style, cell_coords, is_training=False)
style_gram_activation = self.gram_activation[0].clone().detach().squeeze()
style_gram_record.append(style_gram_activation)
# print(style_gram_record.shape)
# Call the model on the target to get target encoder layer
_, model_out_batch_ = self.im_network(None, z_target, cell_coords, is_training=False)
target_gram_activation = self.gram_activation[0].clone().detach().squeeze()
target_gram_record.append(target_gram_activation)
model_out_batch = model_out_batch_.detach().cpu().numpy()[0]
for i in range(batch_num):
point = point_list[i]
model_out = model_out_batch[i * cell_batch_size:(i + 1) * cell_batch_size, 0]
x_coords = self.cell_x + (point[0] - 1) * dimc
y_coords = self.cell_y + (point[1] - 1) * dimc
z_coords = self.cell_z + (point[2] - 1) * dimc
model_float[x_coords + 1, y_coords + 1, z_coords + 1] = model_out
if np.max(model_out) > self.sampling_threshold:
for i in range(-1, 2):
pi = point[0] + i
if pi <= 0 or pi > dimf: continue
for j in range(-1, 2):
pj = point[1] + j
if pj <= 0 or pj > dimf: continue
for k in range(-1, 2):
pk = point[2] + k
if pk <= 0 or pk > dimf: continue
if (frame_flag[pi, pj, pk] == 0):
frame_flag[pi, pj, pk] = 1
queue.append((pi, pj, pk))
# print('iteration {} of {} completed'.format(iter_num, total_iter))
points_num += num_points
# print(points_num)
iter_num += 1
# Now compute the global gram correlation matrix
style_gram_record = torch.cat(style_gram_record, dim=0)
#del style_gram_record
#style_gram_record = style_gram_record_
print(style_gram_record.shape)
target_gram_record = torch.cat(target_gram_record, dim=0)
# del target_gram_record
# target_gram_record = target_gram_record_
target_gram_record.requires_grad_()
target_gram_matrix = torch.matmul(torch.transpose(target_gram_record, 0, 1), target_gram_record)
style_gram_matrix = torch.matmul(torch.transpose(style_gram_record, 0, 1), style_gram_record)
# print(style_gram_matrix.shape)
mse_loss = torch.nn.MSELoss()
loss = mse_loss(target_gram_matrix, style_gram_matrix)
loss.backward(retain_graph=False)
# catch memory leak
# if isinstance(self.target_gram_loss, torch.Tensor):
# del self.target_gram_loss
# self.target_gram_loss.data = target_gram_record.grad.detach().clone()
# clean up memory explicitely
# del loss, target_gram_matrix, style_gram_matrix, target_gram_record, style_gram_record
print('gram_record grad:')
print(target_gram_record.grad.norm())
gram_out = target_gram_record.grad.detach().clone()
# gram_out = gram_out / gram_out.norm()
# print(gram_out)
return gram_out
def latent_style_transfer(self,
z_base: torch.Tensor,
z_style: torch.Tensor,
z_target: torch.Tensor,
gram_loss: torch.Tensor,
step: int,
plot: bool,
flag: bool,
config):
'''
Computes the gram correlation matrix of z_style and z_transfer
or
accumulates the style loss of the loss layer and the gram matrix.
:param z_base: base model latent vector
:param z_style: style model latent vector
:param z_transfer: transfer style model
:param step: iteration step
:param plot: whether to plot
:param flag: whether to compute the content (true) or style (false) error
:param config: configuration parameters
:return:
'''
# scaling variables for style (alpha) and activation (beta) loss.
alpha = 1.0
beta = config.beta
model_float = np.zeros([self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32)
dimc = self.cell_grid_size
dimf = self.frame_grid_size # coarse model evaluation
frame_flag = np.zeros([dimf + 2, dimf + 2, dimf + 2], np.uint8)
queue = []
frame_batch_num = int(dimf ** 3 / self.test_point_batch_size)
assert frame_batch_num > 0
# get frame grid values: this gets frame voxels that contain above threshold values
for i in range(frame_batch_num):
point_coord = self.frame_coords[i * self.test_point_batch_size:(i + 1) * self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_, model_out_ = self.im_network(None, z_target, point_coord, is_training=False)
model_out = model_out_.detach().cpu().numpy()[0]
# TODO: remove dummy data
# model_out = np.random.random(size=[1, 4096, 1])
x_coords = self.frame_x[i * self.test_point_batch_size:(i + 1) * self.test_point_batch_size]
y_coords = self.frame_y[i * self.test_point_batch_size:(i + 1) * self.test_point_batch_size]
z_coords = self.frame_z[i * self.test_point_batch_size:(i + 1) * self.test_point_batch_size]
frame_flag[x_coords + 1, y_coords + 1, z_coords + 1] = np.reshape(
(model_out > self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size])
# get queue and fill up ones
# This puts together a que of frame points to compute values.
for i in range(1, dimf + 1):
for j in range(1, dimf + 1):
for k in range(1, dimf + 1):
maxv = np.max(frame_flag[i - 1:i + 2, j - 1:j + 2, k - 1:k + 2])
minv = np.min(frame_flag[i - 1:i + 2, j - 1:j + 2, k - 1:k + 2])
if maxv != minv:
queue.append((i, j, k))
elif maxv == 1:
x_coords = self.cell_x + (i - 1) * dimc
y_coords = self.cell_y + (j - 1) * dimc
z_coords = self.cell_z + (k - 1) * dimc
model_float[x_coords + 1, y_coords + 1, z_coords + 1] = 1.0
print("running queue:", len(queue))
que_len = len(queue)
cell_batch_size = dimc ** 3
cell_batch_num = int(self.test_point_batch_size / cell_batch_size)
assert cell_batch_num > 0
# run queue
iter_num = 0
points_num = 0
while len(queue) > 0:
batch_num = min(len(queue), cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0] - 1, point[1] - 1, point[2] - 1])
cell_coords = np.concatenate(cell_coords, axis=0)
num_points = cell_coords.shape[0] # record the number of points
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
# Call the model on the base to get base encoder layer
_, model_out_batch_ = self.im_network(None, z_base, cell_coords, is_training=False)
base_activation = self.target_activation[0].clone().detach()
# Call the model on the base to get base encoder layer
_, model_out_batch_ = self.im_network(None, z_target, cell_coords, is_training=False)
target_activation = self.target_activation[0]
# Now compute the gradient
mse_loss = torch.nn.MSELoss()
L2_base = alpha * mse_loss(target_activation, base_activation)
L2_base.backward(retain_graph=False)
# Call the model on the target to get base target | |
<reponame>platforminternetstandaarden/dashboard-internet.nl<filename>dashboard/internet_nl_dashboard/logic/report_to_spreadsheet.py
# SPDX-License-Identifier: Apache-2.0
import itertools
import logging
from string import ascii_uppercase
from tempfile import NamedTemporaryFile
from typing import Any, Dict, List, Union
import pyexcel as p
from django.utils.text import slugify
from openpyxl import load_workbook
from openpyxl.formatting.rule import CellIsRule
from openpyxl.styles import Font, PatternFill
from dashboard.internet_nl_dashboard.logic import ( # pylint: disable=duplicate-code
MAIL_AUTH_CATEGORY, MAIL_AUTH_FIELDS, MAIL_DNSSEC_CATEGORY, MAIL_DNSSEC_FIELDS,
MAIL_IPV6_CATEGORY, MAIL_IPV6_FIELDS, MAIL_LEGACY_CATEGORY, MAIL_LEGACY_FIELDS,
MAIL_OVERALL_FIELDS, MAIL_TLS_CATEGORY, MAIL_TLS_CERTIFICATE_FIELDS, MAIL_TLS_DANE_FIELDS,
MAIL_TLS_TLS_FIELDS, WEB_APPSECPRIV_CATEGORY, WEB_APPSECPRIV_FIELDS, WEB_DNSSEC_CATEGORY,
WEB_DNSSEC_FIELDS, WEB_IPV6_CATEGORY, WEB_IPV6_FIELDS, WEB_LEGACY_CATEGORY, WEB_LEGACY_FIELDS,
WEB_OVERALL_FIELDS, WEB_TLS_CATEGORY, WEB_TLS_CERTIFICATE_FIELDS, WEB_TLS_DANE_FIELDS,
WEB_TLS_HTTP_FIELDS, WEB_TLS_TLS_FIELDS)
from dashboard.internet_nl_dashboard.logic.internet_nl_translations import (get_po_as_dictionary_v2,
translate_field)
from dashboard.internet_nl_dashboard.models import Account, UrlListReport
log = logging.getLogger(__package__)
# todo: read the preferred language of the user and use the translations matching this user...
po_file_as_dictionary = get_po_as_dictionary_v2('en')
"""
Creates spreadsheets containing report data.
Done: make sure the columns are in a custom order. Columns are defined in scanners. The order of the columns isn't.
Done: make sure you can download the spreadsheet in a single click
Done: add a sane logic to the spreadsheet content
Done: support ods and xlsx.
Done: can we support sum rows at the top for boolean values(?), only for excel. Not for ods, which is too bad.
Done: get column translations. From internet.nl PO files? How do they map to these variables?
Done: write some tests for these methods, once they are more table.
"""
SANE_COLUMN_ORDER = {
# scanner
'dns_a_aaaa': {
'overall': WEB_OVERALL_FIELDS,
'ipv6': WEB_IPV6_CATEGORY + WEB_IPV6_FIELDS,
'dnssec': WEB_DNSSEC_CATEGORY + WEB_DNSSEC_FIELDS,
'tls': WEB_TLS_CATEGORY + WEB_TLS_HTTP_FIELDS + WEB_TLS_TLS_FIELDS + WEB_TLS_CERTIFICATE_FIELDS +
WEB_TLS_DANE_FIELDS,
# Added 24th of May 2019
'appsecpriv': WEB_APPSECPRIV_CATEGORY + WEB_APPSECPRIV_FIELDS,
'legacy': WEB_LEGACY_CATEGORY + WEB_LEGACY_FIELDS
},
'dns_soa': {
# any grouping, every group has a empty column between them. The label is not used.
'overall': MAIL_OVERALL_FIELDS,
'ipv6': MAIL_IPV6_CATEGORY + MAIL_IPV6_FIELDS,
'dnssec': MAIL_DNSSEC_CATEGORY + MAIL_DNSSEC_FIELDS,
'auth': MAIL_AUTH_CATEGORY + MAIL_AUTH_FIELDS,
# perhaps split these into multiple groups.
'tls': MAIL_TLS_CATEGORY + MAIL_TLS_TLS_FIELDS + MAIL_TLS_CERTIFICATE_FIELDS + MAIL_TLS_DANE_FIELDS,
'legacy': MAIL_LEGACY_CATEGORY + MAIL_LEGACY_FIELDS
},
}
def iter_all_strings():
# https://stackoverflow.com/questions/29351492/how-to-make-a-continuous-alphabetic-list-python-from-a-z-then-from-aa-ab
for size in itertools.count(1):
for product in itertools.product(ascii_uppercase, repeat=size):
yield "".join(product)
def create_spreadsheet(account: Account, report_id: int):
# Fails softly, without exceptions if there is nothing yet.
report = UrlListReport.objects.all().filter(
urllist__account=account,
pk=report_id).select_related('urllist').first()
if not report:
return None, None
calculation = report.calculation
urls = calculation["urls"]
protocol = 'dns_soa' if report.report_type == 'mail' else 'dns_a_aaaa'
# results is a matrix / 2-d array / array with arrays.
data: List[List[Any]] = []
# done: functions are not supported in ods, which makes the output look terrible... Should we omit this?
# omitted, as it gives inconsistent results. Kept code to re-use it if there is a workaround for ods / or we
# know this is an ods file.
# data += [formula_row(protocol=protocol, function="=COUNTA(%(column_name)s8:%(column_name)s9999)")]
# data += [formula_row(protocol=protocol, function="=COUNTIF(%(column_name)s8:%(column_name)s9999,1)")]
# data += [formula_row(protocol=protocol, function="=COUNTIF(%(column_name)s8:%(column_name)s9999,0)")]
# data += [formula_row(protocol=protocol, function="=COUNTIF(%(column_name)s8:%(column_name)s9999,\"?\")")]
# add an empty row for clarity
data += [[]]
data += [category_headers(protocol)]
data += [headers(protocol)]
data += urllistreport_to_spreadsheet_data(category_name=report.urllist.name, urls=urls, protocol=protocol)
filename = "internet nl dashboard report " \
f"{report.pk} {report.urllist.name} {report.urllist.scan_type} {report.at_when.date()}"
# The sheet is created into memory and then passed to the caller. They may save it, or serve it, etc...
# http://docs.pyexcel.org/en/latest/tutorial06.html?highlight=memory
# An issue with Sheet prevents uneven row lengths to be added. But that works fine with bookdicts
# The name of a sheet can only be 32 characters, make sure the most significant info is in the title first.
tabname = f"{report.pk} {report.urllist.scan_type} {report.at_when.date()} {report.urllist.name}"[0:31]
book = p.get_book(bookdict={slugify(tabname): data})
return filename, book
def upgrade_excel_spreadsheet(spreadsheet_data):
with NamedTemporaryFile(suffix=".xlsx", delete=False) as tmp:
log.debug(f"Saving temp outout to {tmp.name}")
spreadsheet_data.save_as(array=spreadsheet_data, filename=tmp.name)
workbook = load_workbook(tmp.name)
worksheet = workbook.active
# nicer columns
worksheet.column_dimensions["A"].width = "30"
worksheet.column_dimensions["B"].width = "30"
# Add statistic rows:
worksheet.insert_rows(0, amount=9)
worksheet['B1'] = "Total"
worksheet['B2'] = "Passed"
worksheet['B3'] = "Info"
worksheet['B4'] = "Warning"
worksheet['B5'] = "Failed"
worksheet['B6'] = "Not tested"
worksheet['B7'] = "Error"
worksheet['B8'] = "Test not applicable (mail only)"
worksheet['B9'] = "Percentage passed"
# bold totals:
for i in range(1, 10):
worksheet[f'B{i}'].font = Font(bold=True)
data_columns = [
'F', 'G', 'H', 'I', 'J', 'K', 'L', "M", "N", 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', 'AO',
'AP', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AV', 'AW', 'AX', 'AY', 'AZ', 'BA', 'BB', 'BC', 'BD',
'BE', 'BF', 'BG', 'BH', 'BI', 'BJ', 'BK'
]
# add some statistics
for cell in data_columns:
# if header, then aggregate
if worksheet[f'{cell}12'].value:
# There is a max of 5000 domains per scan. So we set this to something lower.
# There is no good support of headers versus data, which makes working with excel a drama
# If you ever read this code, and want a good spreadsheet editor: try Apple Numbers. It's fantastic.
worksheet[f'{cell}1'] = f'=COUNTA({cell}13:{cell}5050)'
# todo: also support other values
worksheet[f'{cell}2'] = f'=COUNTIF({cell}13:{cell}5050, "passed")'
worksheet[f'{cell}3'] = f'=COUNTIF({cell}13:{cell}5050, "info")'
worksheet[f'{cell}4'] = f'=COUNTIF({cell}13:{cell}5050, "warning")'
worksheet[f'{cell}5'] = f'=COUNTIF({cell}13:{cell}5050, "failed")'
worksheet[f'{cell}6'] = f'=COUNTIF({cell}13:{cell}5050, "not_tested")'
worksheet[f'{cell}7'] = f'=' \
f'COUNTIF({cell}13:{cell}5050, "error")+' \
f'COUNTIF({cell}13:{cell}5050, "unreachable")+' \
f'COUNTIF({cell}13:{cell}5050, "untestable")+' \
f'COUNTIF({cell}13:{cell}5050, "not_testable")'
worksheet[f'{cell}8'] = f'=' \
f'COUNTIF({cell}13:{cell}5050, "no_mx")+' \
f'COUNTIF({cell}13:{cell}5050, "not_applicable")'
# Not applicable and not testable are subtracted from the total.
# See https://github.com/internetstandards/Internet.nl-dashboard/issues/68
# Rounding's num digits is NOT the number of digits behind the comma, but the total number of digits.
# todo: we should use the calculations in report.py. And there include the "missing" / empty stuff IF
# that is missing.
# IF( H1=0,0,ROUND( H2÷ H1, 4))
worksheet[f'{cell}9'] = f'=IF({cell}1=0,0,ROUND({cell}2/{cell}1, 4))'
worksheet[f'{cell}9'].number_format = '0.00%'
# make headers bold
worksheet['A12'].font = Font(bold=True) # List
worksheet['B12'].font = Font(bold=True) # Url
worksheet['C11'].font = Font(bold=True) # overall
worksheet['C12'].font = Font(bold=True) # % Score
worksheet['D12'].font = Font(bold=True) # Report
for cell in data_columns:
worksheet[f'{cell}11'].font = Font(bold=True)
worksheet[f'{cell}12'].font = Font(bold=True)
# Freeze pane to make navigation easier.
worksheet.freeze_panes = worksheet['E13']
# there is probably a feature that puts this in a single conditional value.
conditional_rules = {
"passed": PatternFill(start_color='B7FFC8', end_color='B7FFC8', fill_type='solid'),
"failed": PatternFill(start_color='FFB7B7', end_color='FFB7B7', fill_type='solid'),
"warning": PatternFill(start_color='FFD9B7', end_color='FFD9B7', fill_type='solid'),
"info": PatternFill(start_color='B7E3FF', end_color='B7E3FF', fill_type='solid'),
"good_not_tested": PatternFill(start_color='99FFFF', end_color='C0C0C0', fill_type='solid'),
"not_tested": PatternFill(start_color='99FFFF', end_color='DBDBDB', fill_type='solid'),
}
# Set the measurements to green/red depending on value using conditional formatting.
# There is no true/false, but we can color based on value.
for grade, pattern in conditional_rules.items():
worksheet.conditional_formatting.add(
'F13:CD5050',
CellIsRule(operator='=', formula=[f'"{grade}"'], stopIfTrue=True, fill=pattern)
)
workbook.save(tmp.name)
return tmp
def category_headers(protocol: str = 'dns_soa'):
sheet_headers: List[str] = ['', '']
for group in SANE_COLUMN_ORDER[protocol]:
sheet_headers += [translate_field(group, translation_dictionary=po_file_as_dictionary)]
for _ in range(len(SANE_COLUMN_ORDER[protocol][group])-1):
sheet_headers += ['']
# add empty thing after each group to make distinction per group clearer
sheet_headers += ['']
return sheet_headers
def headers(protocol: str = 'dns_soa'):
sheet_headers = ['List', 'Url']
for group in SANE_COLUMN_ORDER[protocol]:
sheet_headers += SANE_COLUMN_ORDER[protocol][group]
# add empty thing after each group to make distinction per group clearer
sheet_headers += ['']
# translate them:
sheet_headers = [translate_field(header, translation_dictionary=po_file_as_dictionary) for header in sheet_headers]
return sheet_headers
def formula_row(function: str, protocol: str = 'dns_soa'):
data = []
my_headers = headers(protocol)
total = len(my_headers)
empty_headers = []
for i in range(total):
if my_headers[i] == '':
empty_headers.append(i)
# log.debug(empty_headers)
# there is probably a function that does both. I'm not very familiar with itertools.
index = 0
for column_name in itertools.islice(iter_all_strings(), total):
if index in empty_headers:
data.append('')
else:
data.append(function % {'column_name': column_name})
index += 1
return data
def urllistreport_to_spreadsheet_data(category_name: str, urls: List[Any], protocol: str = 'dns_soa'):
data = []
for url in urls:
if len(url['endpoints']) == 1:
# we can just put the whole result in one, which is nicer to look at.
for endpoint in url['endpoints']:
if endpoint['protocol'] != protocol:
continue
keyed_ratings = endpoint['ratings_by_type']
data.append([category_name, url['url']] + keyed_values_as_boolean(keyed_ratings, protocol))
else:
data.append([category_name, url['url']])
for endpoint in url['endpoints']:
if endpoint['protocol'] != protocol:
continue
keyed_ratings = endpoint['ratings_by_type']
data.append(['', ''] + keyed_values_as_boolean(keyed_ratings, protocol))
# log.debug(data)
return data
def keyed_values_as_boolean(keyed_ratings: Dict[str, Dict[str, Union[str, int]]], protocol: str = 'dns_soa'):
"""
Keyed rating:
{'internet_nl_mail_auth_dkim_exist': {'comply_or_explain_explained_on': '',
'comply_or_explain_explanation': '',
'comply_or_explain_explanation_valid_until': '',
'comply_or_explain_valid_at_time_of_report': False,
'explanation': 'Test '
'internet_nl_mail_auth_dkim_exist '
'resulted in failed.',
'high': 1,
'is_explained': False,
'last_scan': '2019-07-09T11:07:43.510452+00:00',
'low': 0,
'medium': 0,
'not_applicable': False,
'not_testable': False,
'ok': 0,
'scan': 43945,
'scan_type': 'internet_nl_mail_auth_dkim_exist',
'since': '2019-07-09T11:07:43.510175+00:00',
'type': 'internet_nl_mail_auth_dkim_exist'},...
:param keyed_ratings:
:param protocol:
:return:
"""
values = []
for group in SANE_COLUMN_ORDER[protocol]:
for | |
ident, else ``False``.
"""
for c in ident:
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
return True
return False
def matchIdentBearingDistance(db, ov, station):
"""Use reg-ex to try to match ``/OV`` field to an ident and bearing/distance.
Perform a regular expression match to see if the ``/OV`` field matches the
ident with optional bearing/distance information.
Args:
db (object): ``fisb_location`` database containing location information.
msg (dict): Level2 PIREP message.
ov (str): ``/OV`` field from PIREP message.
station (str): PIREP station.
Returns:
list: Coordinates [latitude, longitude] that match.
``None`` if there is no match.
"""
# Match for 3 digit distances.
m = IDENT_BEARING_DISTANCE_RE.match(ov)
if m is None:
# Try with 2 digit distances.
# So items like IND05020 as opposed to what should
# have been sent IND050020.
m = IDENT_BEARING_DISTANCE1_RE.match(ov)
if m is not None:
_, _, g3, _, g5, g6 = m.groups()
if g3 is None:
ident = station
else:
ident = g3
if g5 is not None:
magneticBearing = int(g5)
nm = int(g6)
else:
# If no bearing and distance found, use -1 for the bearing
# and zero for the distance.
magneticBearing = -1
nm = 0
# do database match. Will either have results or None.
coords = internalPirepLocation(db, ident, magneticBearing, nm)
return coords
return None
def makeGeojson(itemType, coords, msg):
"""Create geojson dictionary.
Args:
itemType (str): One of ``Point`` or ``LineString``.
coords (list): List of two coordinates (longitude, latitude).
id (str): Unique name (``_id``) field for Pirep
Returns:
dict: Dictionary which is a 'geojson' slot of the specified type
and coordinates.
"""
geojson = {'type': 'FeatureCollection', \
'features': [{'type': 'Feature', \
'geometry': { \
'type': itemType, \
'coordinates': coords}}]}
geojson['features'][0]['properties'] = { \
'id': msg['unique_name'], \
}
return geojson
def checkLatLong(db, msg, ov, station):
"""Handle messages that contain latitude and longitude information directly.
Some messages directly report the latitude and longitude of the
reporting point. This function handles messages like:
``xxxxN xxxxxW``.
Args:
db (object): ``fisb_location`` database containing location information.
msg (dict): Level2 PIREP message.
ov (str): ``/OV`` field from PIREP message.
station (str): PIREP station.
Returns:
tuple: Tuple:
1. (bool) ``True`` if a
location was found and the message argmented, or ``False``
if no location was found.
2. (dict) Augmented ``msg`` with location information if
available, or just the original ``msg`` if no changes were made.
"""
m = LAT_LONG_RE.match(ov)
if m is not None:
g1, g2 = m.groups()
latitude = round(float(g1) / 100.0, 6)
longitude = round(float(g2) / 100.0, 6) * -1.0
msg['geojson'] = makeGeojson('Point', [longitude, latitude], msg)
return (True, msg)
return (False, msg)
def checkIdentBearingDistance(db, msg, ov, station):
"""Handles PIREPS that contain an ident and optional bearing and distance.
Handles message with ``/OV`` values consisting of some combination
of an identifier (station used if not present) and optional bearing
and distance.
This makes up the majority of all messages.
An typical example of an ``/OV`` segment from this message would be:
``IND270020``. This means 20NM from ``IND`` at a magnetic bearing
of 270.
Args:
db (object): ``fisb_location`` database containing location information.
msg (dict): Level2 PIREP message.
ov (str): ``/OV`` field from PIREP message.
station (str): PIREP station.
Returns:
tuple: Tuple:
1. (bool) ``True`` if a location was found and the message
argmented, or ``False`` if no location was found.
2. (dict) Augmented ``msg`` with location information if
available, or just the original ``msg`` if no changes
made.
"""
coords = matchIdentBearingDistance(db, ov, station)
if coords == None:
return (False, msg)
msg['geojson'] = makeGeojson('Point', coords, msg)
return (True, msg)
def checkRoute(db, msg, ov, station):
"""Create PIREP for route data consisting of multiple points.
This is basically the same as :func:`checkIdentBearingDistance`
except that there are multiple locations all separated
with '``-``' (such as ``HUF-IND-TYQ170020``).
Instead of creating a single point geometry, a route will consist
of two or more sets of coordinates which form a ``POLYLINE``.
Args:
db (object): ``fisb_location`` database containing location information.
msg (dict): Level2 PIREP message.
ov (str): ``/OV`` field from PIREP message.
station (str): PIREP station.
Returns:
tuple: Tuple:
1. (bool) ``True`` if a location was found and the message
argmented, or ``False`` if no location was found.
2. (dict) Augmented ``msg`` with location information if
available, or just the original ``msg`` if no changes
made.
"""
routePoints = ov.split('-')
routeCoords = [matchIdentBearingDistance(db, x, station) for x in routePoints]
if None in routeCoords:
return (False, msg)
msg['geojson'] = makeGeojson('LineString', routeCoords, msg)
return (True, msg)
def checkDistanceDirectionFromIdent(db, msg, ov, station):
"""Try to match distance and direction from a location.
Try to parse ``/OV`` fields like ``10 S BOS``. Here,
the directions are compass directions.
This is the second most common kind of ``/OV`` field.
Looks for various text clues in the message that indicate
the location of the message.
Args:
db (object): ``fisb_location`` database containing location information.
msg (dict): Level2 PIREP message.
ov (str): ``/OV`` field from PIREP message.
station (str): PIREP station.
Returns:
tuple: Tuple:
1. (bool) ``True`` if a location was found and the message
argmented, or ``False`` if no location was found.
2. (dict) Augmented ``msg`` with location information if
available, or just the original ``msg`` if no changes
made.
"""
m = DIST_DIR_FM_IDENT_RE.match(ov)
if m is not None:
# g1 - distance
# g3 - NM or SM
# g4 - direction
# g7 - ident
g1, _, g3, g4, _, _, ident = m.groups()
magneticBearing = BEARING_DICT[g4]
nm = float(g1)
# Convert SM to NM if needed
if (g3 is not None) and (g3 == 'SM'):
nm = nm * 0.86897624
# No ident means use station
if ident is None:
ident = station
coords = internalPirepLocation(db, ident, magneticBearing, nm)
if coords is None:
return (False, msg)
msg['geojson'] = makeGeojson('Point', coords, msg)
return (True, msg)
return (False, msg)
def checkText(db, msg, ov, station):
"""Check PIREP for text based hints.
Looks for various text clues in the message that indicate
the location of the message. Most of these are messages that
indicate that the origin of the PIREP is the station.
Args:
db (object): ``fisb_location`` database containing location information.
msg (dict): Level2 PIREP message.
ov (str): ``/OV`` field from PIREP message.
station (str): PIREP station.
Returns:
tuple: Tuple:
1. (bool) ``True`` if a location was found and the message
argmented, or ``False`` if no location was found.
2. (dict) Augmented ``msg`` with location information if
available, or just the original ``msg`` if no changes
made.
"""
# Assume station for these
if (ov.startswith('RUNWAY') or \
ov.startswith('RWY') or \
ov.startswith('FINAL') or \
ov.startswith('ON FINAL') or \
ov.startswith('SHORT FINAL') or \
ov == 'DURD' or \
ov == 'DURC'):
coords = internalPirepLocation(db, station, -1, 0)
if coords is None:
return (False, msg)
msg['geojson'] = makeGeojson('Point', coords, msg)
return (True, msg)
return (False, msg)
def saveUnmatchedPirep(contents):
"""Save any unmatched PIREPs to a file for future study (or amusement).
If ``cfg.SAVE_UNMATCHED_PIREPS`` is ``True``, will append the
PIREP to the file specified by ``cfg.SAVE_UNMATCHED_PIREPS_FILE``.
Args:
contents (str): Original text contents of a PIREP.
"""
with open(cfg.SAVE_UNMATCHED_PIREPS_FILE, 'a') as f:
f.write(contents)
f.write('\n')
def pirepLocation(db, msg):
"""Augment message with ``geometry`` information if location
can be decoded..
Take a PIREP message and try to determine its location. If so
the message is augmented with the appropriate ``geojson``
key.
If the location cannot be found, just returns the message.
Args:
db (object): Database connection to ``fisb_location`` database.
msg (dict): Level2 message to augment
Returns:
dict: ``msg``, either augmented or not.
"""
station = msg['station']
ov = msg['ov']
# Check for '-'. This indicates route.
if '-' in ov:
res, msg = checkRoute(db, msg, ov, station)
if res:
return msg
# Most common case. Ident and optional bearing/distance
res, msg = checkIdentBearingDistance(db, msg, ov, station)
if res:
return msg
# Next most coomon case is miles and direction from point in
# form like '10 E IND', '10E | |
traceback):
"""
Method used for with statement
"""
self.screen.close()
if sys.platform == "win32":
import win32con
import win32console
import win32event
import win32file
import pywintypes
class _WindowsScreen(Screen):
"""
Windows screen implementation.
"""
# Virtual key code mapping.
_KEY_MAP = {
win32con.VK_ESCAPE: Screen.KEY_ESCAPE,
win32con.VK_F1: Screen.KEY_F1,
win32con.VK_F2: Screen.KEY_F2,
win32con.VK_F3: Screen.KEY_F3,
win32con.VK_F4: Screen.KEY_F4,
win32con.VK_F5: Screen.KEY_F5,
win32con.VK_F6: Screen.KEY_F6,
win32con.VK_F7: Screen.KEY_F7,
win32con.VK_F8: Screen.KEY_F8,
win32con.VK_F9: Screen.KEY_F9,
win32con.VK_F10: Screen.KEY_F10,
win32con.VK_F11: Screen.KEY_F11,
win32con.VK_F12: Screen.KEY_F12,
win32con.VK_F13: Screen.KEY_F13,
win32con.VK_F14: Screen.KEY_F14,
win32con.VK_F15: Screen.KEY_F15,
win32con.VK_F16: Screen.KEY_F16,
win32con.VK_F17: Screen.KEY_F17,
win32con.VK_F18: Screen.KEY_F18,
win32con.VK_F19: Screen.KEY_F19,
win32con.VK_F20: Screen.KEY_F20,
win32con.VK_F21: Screen.KEY_F21,
win32con.VK_F22: Screen.KEY_F22,
win32con.VK_F23: Screen.KEY_F23,
win32con.VK_F24: Screen.KEY_F24,
win32con.VK_PRINT: Screen.KEY_PRINT_SCREEN,
win32con.VK_INSERT: Screen.KEY_INSERT,
win32con.VK_DELETE: Screen.KEY_DELETE,
win32con.VK_HOME: Screen.KEY_HOME,
win32con.VK_END: Screen.KEY_END,
win32con.VK_LEFT: Screen.KEY_LEFT,
win32con.VK_UP: Screen.KEY_UP,
win32con.VK_RIGHT: Screen.KEY_RIGHT,
win32con.VK_DOWN: Screen.KEY_DOWN,
win32con.VK_PRIOR: Screen.KEY_PAGE_UP,
win32con.VK_NEXT: Screen.KEY_PAGE_DOWN,
win32con.VK_BACK: Screen.KEY_BACK,
win32con.VK_TAB: Screen.KEY_TAB
}
_EXTRA_KEY_MAP = {
win32con.VK_NUMPAD0: Screen.KEY_NUMPAD0,
win32con.VK_NUMPAD1: Screen.KEY_NUMPAD1,
win32con.VK_NUMPAD2: Screen.KEY_NUMPAD2,
win32con.VK_NUMPAD3: Screen.KEY_NUMPAD3,
win32con.VK_NUMPAD4: Screen.KEY_NUMPAD4,
win32con.VK_NUMPAD5: Screen.KEY_NUMPAD5,
win32con.VK_NUMPAD6: Screen.KEY_NUMPAD6,
win32con.VK_NUMPAD7: Screen.KEY_NUMPAD7,
win32con.VK_NUMPAD8: Screen.KEY_NUMPAD8,
win32con.VK_NUMPAD9: Screen.KEY_NUMPAD9,
win32con.VK_MULTIPLY: Screen.KEY_MULTIPLY,
win32con.VK_ADD: Screen.KEY_ADD,
win32con.VK_SUBTRACT: Screen.KEY_SUBTRACT,
win32con.VK_DECIMAL: Screen.KEY_DECIMAL,
win32con.VK_DIVIDE: Screen.KEY_DIVIDE,
win32con.VK_CAPITAL: Screen.KEY_CAPS_LOCK,
win32con.VK_NUMLOCK: Screen.KEY_NUM_LOCK,
win32con.VK_SCROLL: Screen.KEY_SCROLL_LOCK,
win32con.VK_SHIFT: Screen.KEY_SHIFT,
win32con.VK_CONTROL: Screen.KEY_CONTROL,
win32con.VK_MENU: Screen.KEY_MENU,
}
# Foreground colour lookup table.
_COLOURS = {
Screen.COLOUR_BLACK: 0,
Screen.COLOUR_RED: win32console.FOREGROUND_RED,
Screen.COLOUR_GREEN: win32console.FOREGROUND_GREEN,
Screen.COLOUR_YELLOW: (win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN),
Screen.COLOUR_BLUE: win32console.FOREGROUND_BLUE,
Screen.COLOUR_MAGENTA: (win32console.FOREGROUND_RED |
win32console.FOREGROUND_BLUE),
Screen.COLOUR_CYAN: (win32console.FOREGROUND_BLUE |
win32console.FOREGROUND_GREEN),
Screen.COLOUR_WHITE: (win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
}
# Background colour lookup table.
_BG_COLOURS = {
Screen.COLOUR_BLACK: 0,
Screen.COLOUR_RED: win32console.BACKGROUND_RED,
Screen.COLOUR_GREEN: win32console.BACKGROUND_GREEN,
Screen.COLOUR_YELLOW: (win32console.BACKGROUND_RED |
win32console.BACKGROUND_GREEN),
Screen.COLOUR_BLUE: win32console.BACKGROUND_BLUE,
Screen.COLOUR_MAGENTA: (win32console.BACKGROUND_RED |
win32console.BACKGROUND_BLUE),
Screen.COLOUR_CYAN: (win32console.BACKGROUND_BLUE |
win32console.BACKGROUND_GREEN),
Screen.COLOUR_WHITE: (win32console.BACKGROUND_RED |
win32console.BACKGROUND_GREEN |
win32console.BACKGROUND_BLUE)
}
# Attribute lookup table
_ATTRIBUTES = {
0: lambda x: x,
Screen.A_BOLD: lambda x: x | win32console.FOREGROUND_INTENSITY,
Screen.A_NORMAL: lambda x: x,
# Windows console uses a bitmap where background is the top nibble,
# so we can reverse by swapping nibbles.
Screen.A_REVERSE: lambda x: ((x & 15) * 16) + ((x & 240) // 16),
Screen.A_UNDERLINE: lambda x: x
}
def __init__(self, stdout, stdin, buffer_height, old_out, old_in,
unicode_aware=False):
"""
:param stdout: The win32console PyConsoleScreenBufferType object for stdout.
:param stdin: The win32console PyConsoleScreenBufferType object for stdin.
:param buffer_height: The buffer height for this window (for testing only).
:param old_out: The original win32console PyConsoleScreenBufferType object for stdout
that should be restored on exit.
:param old_in: The original stdin state that should be restored on exit.
:param unicode_aware: Whether this Screen can use unicode or not.
"""
# Save off the screen details and set up the scrolling pad.
info = stdout.GetConsoleScreenBufferInfo()['Window']
width = info.Right - info.Left + 1
height = info.Bottom - info.Top + 1
# Detect UTF-8 if needed and then construct the Screen.
if unicode_aware is None:
# According to MSDN, 65001 is the Windows UTF-8 code page.
unicode_aware = win32console.GetConsoleCP() == 65001
super(_WindowsScreen, self).__init__(
height, width, buffer_height, unicode_aware)
# Save off the console details.
self._stdout = stdout
self._stdin = stdin
self._last_width = width
self._last_height = height
self._old_out = old_out
self._old_in = old_in
# Windows is limited to the ANSI colour set.
self.colours = 8
# Opt for compatibility with Linux by default
self._map_all = False
# Set of keys currently pressed.
self._keys = set()
def close(self, restore=True):
"""
Close down this Screen and tidy up the environment as required.
:param restore: whether to restore the environment or not.
"""
if restore:
# Reset the original screen settings.
self._old_out.SetConsoleActiveScreenBuffer()
self._stdin.SetConsoleMode(self._old_in)
def map_all_keys(self, state):
"""
Switch on extended keyboard mapping for this Screen.
:param state: Boolean flag where true means map all keys.
Enabling this setting will allow Windows to tell you when any key
is pressed, including metakeys (like shift and control) and whether
the numeric keypad keys have been used.
.. warning::
Using this means your application will not be compatible across
all platforms.
"""
self._map_all = state
def get_event(self):
"""
Check for any event without waiting.
"""
# Look for a new event and consume it if there is one.
while len(self._stdin.PeekConsoleInput(1)) > 0:
event = self._stdin.ReadConsoleInput(1)[0]
if event.EventType == win32console.KEY_EVENT:
# Pasting unicode text appears to just generate key-up
# events (as if you had pressed the Alt keys plus the
# keypad code for the character), but the rest of the
# console input simply doesn't
# work with key up events - e.g. misses keyboard repeats.
#
# We therefore allow any key press (i.e. KeyDown) event and
# _any_ event that appears to have popped up from nowhere
# as long as the Alt key is present.
key_code = ord(event.Char)
logger.debug("Processing key: %x", key_code)
if (event.KeyDown or
(key_code > 0 and key_code not in self._keys and
event.VirtualKeyCode == win32con.VK_MENU)):
# Record any keys that were pressed.
if event.KeyDown:
self._keys.add(key_code)
# Translate keys into a KeyboardEvent object.
if event.VirtualKeyCode in self._KEY_MAP:
key_code = self._KEY_MAP[event.VirtualKeyCode]
# Sadly, we are limited to Linux terminal input and so
# can't return modifier states in a cross-platform way.
# If the user decided not to be cross-platform, so be
# it, otherwise map some standard bindings for extended
# keys.
if (self._map_all and
event.VirtualKeyCode in self._EXTRA_KEY_MAP):
key_code = self._EXTRA_KEY_MAP[event.VirtualKeyCode]
else:
if (event.VirtualKeyCode == win32con.VK_TAB and
event.ControlKeyState &
win32con.SHIFT_PRESSED):
key_code = Screen.KEY_BACK_TAB
# Don't return anything if we didn't have a valid
# mapping.
if key_code:
return KeyboardEvent(key_code)
else:
# Tidy up any key that was previously pressed. At
# start-up, we may be mid-key, so can't assume this must
# always match up.
if key_code in self._keys:
self._keys.remove(key_code)
elif event.EventType == win32console.MOUSE_EVENT:
# Translate into a MouseEvent object.
logger.debug("Processing mouse: %d, %d",
event.MousePosition.X, event.MousePosition.Y)
button = 0
if event.EventFlags == 0:
# Button pressed - translate it.
if (event.ButtonState &
win32con.FROM_LEFT_1ST_BUTTON_PRESSED != 0):
button |= MouseEvent.LEFT_CLICK
if (event.ButtonState &
win32con.RIGHTMOST_BUTTON_PRESSED != 0):
button |= MouseEvent.RIGHT_CLICK
elif event.EventFlags & win32con.DOUBLE_CLICK != 0:
button |= MouseEvent.DOUBLE_CLICK
return MouseEvent(event.MousePosition.X,
event.MousePosition.Y,
button)
# If we get here, we've fully processed the event queue and found
# nothing interesting.
return None
def has_resized(self):
"""
Check whether the screen has been re-sized.
"""
# Get the current Window dimensions and check them against last
# time.
re_sized = False
info = self._stdout.GetConsoleScreenBufferInfo()['Window']
width = info.Right - info.Left + 1
height = info.Bottom - info.Top + 1
if width != self._last_width or height != self._last_height:
re_sized = True
return re_sized
def _change_colours(self, colour, attr, bg):
"""
Change current colour if required.
:param colour: New colour to use.
:param attr: New attributes to use.
:param bg: New background colour to use.
"""
# Change attribute first as this will reset colours when swapping
# modes.
if colour != self._colour or attr != self._attr or self._bg != bg:
new_attr = self._ATTRIBUTES[attr](
self._COLOURS[colour] + self._BG_COLOURS[bg])
self._stdout.SetConsoleTextAttribute(new_attr)
self._attr = attr
self._colour = colour
self._bg = bg
def _print_at(self, text, x, y, width):
"""
Print string at the required location.
:param text: The text string to print.
:param x: The x coordinate
:param y: The Y coordinate
:param width: The width of the character (for dual-width glyphs in CJK languages).
"""
# We can throw temporary errors on resizing, so catch and ignore
# them on the assumption that we'll resize shortly.
try:
# Move the cursor if necessary
if x != self._cur_x or y != self._cur_y:
self._stdout.SetConsoleCursorPosition(
win32console.PyCOORDType(x, y))
# Print the text at the required location and update the current
# position.
self._stdout.WriteConsole(text)
self._cur_x = x + width
self._cur_y = y
except pywintypes.error:
pass
def wait_for_input(self, timeout):
"""
Wait until there is some input or the timeout is hit.
:param timeout: Time to wait for input in seconds (floating point).
"""
rc = win32event.WaitForSingleObject(self._stdin, int(timeout * 1000))
if rc not in [0, 258]:
raise RuntimeError(rc)
def _scroll(self, lines):
"""
Scroll the window up or down.
:param lines: Number of lines to scroll. Negative numbers scroll
down.
"""
# Scroll the visible screen up by one line
info = self._stdout.GetConsoleScreenBufferInfo()['Window']
rectangle = win32console.PySMALL_RECTType(
info.Left, info.Top + lines, info.Right, info.Bottom)
new_pos = win32console.PyCOORDType(0, info.Top)
self._stdout.ScrollConsoleScreenBuffer(
rectangle, None, new_pos, " ", 0)
def _clear(self):
"""
Clear the terminal.
"""
info = self._stdout.GetConsoleScreenBufferInfo()['Window']
| |
:param usage_date:
:type usage_date: ~datetime.date
:param user_principal_name:
:type user_principal_name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'completed_black_and_white_job_count': {'key': 'completedBlackAndWhiteJobCount', 'type': 'long'},
'completed_color_job_count': {'key': 'completedColorJobCount', 'type': 'long'},
'incomplete_job_count': {'key': 'incompleteJobCount', 'type': 'long'},
'usage_date': {'key': 'usageDate', 'type': 'date'},
'user_principal_name': {'key': 'userPrincipalName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphPrintUsageSummaryByUser, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.completed_black_and_white_job_count = kwargs.get('completed_black_and_white_job_count', None)
self.completed_color_job_count = kwargs.get('completed_color_job_count', None)
self.incomplete_job_count = kwargs.get('incomplete_job_count', None)
self.usage_date = kwargs.get('usage_date', None)
self.user_principal_name = kwargs.get('user_principal_name', None)
class MicrosoftGraphProvisionedIdentity(msrest.serialization.Model):
"""provisionedIdentity.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param details: detailsInfo.
:type details: dict[str, object]
:param display_name:
:type display_name: str
:param id:
:type id: str
:param identity_type:
:type identity_type: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'details': {'key': 'details', 'type': '{object}'},
'display_name': {'key': 'displayName', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'identity_type': {'key': 'identityType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphProvisionedIdentity, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.details = kwargs.get('details', None)
self.display_name = kwargs.get('display_name', None)
self.id = kwargs.get('id', None)
self.identity_type = kwargs.get('identity_type', None)
class MicrosoftGraphProvisioningObjectSummary(MicrosoftGraphEntity):
"""provisioningObjectSummary.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param action:
:type action: str
:param activity_date_time:
:type activity_date_time: ~datetime.datetime
:param change_id:
:type change_id: str
:param cycle_id:
:type cycle_id: str
:param duration_in_milliseconds:
:type duration_in_milliseconds: int
:param initiated_by: initiator.
:type initiated_by: ~reports.models.MicrosoftGraphInitiator
:param job_id:
:type job_id: str
:param modified_properties:
:type modified_properties: list[~reports.models.MicrosoftGraphModifiedProperty]
:param provisioning_steps:
:type provisioning_steps: list[~reports.models.MicrosoftGraphProvisioningStep]
:param service_principal: provisioningServicePrincipal.
:type service_principal: ~reports.models.MicrosoftGraphProvisioningServicePrincipal
:param source_identity: provisionedIdentity.
:type source_identity: ~reports.models.MicrosoftGraphProvisionedIdentity
:param source_system: provisioningSystemDetails.
:type source_system: ~reports.models.MicrosoftGraphProvisioningSystemDetails
:param status_info: statusBase.
:type status_info: ~reports.models.MicrosoftGraphStatusBase
:param target_identity: provisionedIdentity.
:type target_identity: ~reports.models.MicrosoftGraphProvisionedIdentity
:param target_system: provisioningSystemDetails.
:type target_system: ~reports.models.MicrosoftGraphProvisioningSystemDetails
:param tenant_id:
:type tenant_id: str
"""
_validation = {
'duration_in_milliseconds': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'action': {'key': 'action', 'type': 'str'},
'activity_date_time': {'key': 'activityDateTime', 'type': 'iso-8601'},
'change_id': {'key': 'changeId', 'type': 'str'},
'cycle_id': {'key': 'cycleId', 'type': 'str'},
'duration_in_milliseconds': {'key': 'durationInMilliseconds', 'type': 'int'},
'initiated_by': {'key': 'initiatedBy', 'type': 'MicrosoftGraphInitiator'},
'job_id': {'key': 'jobId', 'type': 'str'},
'modified_properties': {'key': 'modifiedProperties', 'type': '[MicrosoftGraphModifiedProperty]'},
'provisioning_steps': {'key': 'provisioningSteps', 'type': '[MicrosoftGraphProvisioningStep]'},
'service_principal': {'key': 'servicePrincipal', 'type': 'MicrosoftGraphProvisioningServicePrincipal'},
'source_identity': {'key': 'sourceIdentity', 'type': 'MicrosoftGraphProvisionedIdentity'},
'source_system': {'key': 'sourceSystem', 'type': 'MicrosoftGraphProvisioningSystemDetails'},
'status_info': {'key': 'statusInfo', 'type': 'MicrosoftGraphStatusBase'},
'target_identity': {'key': 'targetIdentity', 'type': 'MicrosoftGraphProvisionedIdentity'},
'target_system': {'key': 'targetSystem', 'type': 'MicrosoftGraphProvisioningSystemDetails'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphProvisioningObjectSummary, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.action = kwargs.get('action', None)
self.activity_date_time = kwargs.get('activity_date_time', None)
self.change_id = kwargs.get('change_id', None)
self.cycle_id = kwargs.get('cycle_id', None)
self.duration_in_milliseconds = kwargs.get('duration_in_milliseconds', None)
self.initiated_by = kwargs.get('initiated_by', None)
self.job_id = kwargs.get('job_id', None)
self.modified_properties = kwargs.get('modified_properties', None)
self.provisioning_steps = kwargs.get('provisioning_steps', None)
self.service_principal = kwargs.get('service_principal', None)
self.source_identity = kwargs.get('source_identity', None)
self.source_system = kwargs.get('source_system', None)
self.status_info = kwargs.get('status_info', None)
self.target_identity = kwargs.get('target_identity', None)
self.target_system = kwargs.get('target_system', None)
self.tenant_id = kwargs.get('tenant_id', None)
class MicrosoftGraphProvisioningServicePrincipal(MicrosoftGraphIdentity):
"""provisioningServicePrincipal.
:param display_name: The identity's display name. Note that this may not always be available or
up to date. For example, if a user changes their display name, the API may show the new value
in a future response, but the items associated with the user won't show up as having changed
when using delta.
:type display_name: str
:param id: Unique identifier for the identity.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphProvisioningServicePrincipal, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
class MicrosoftGraphProvisioningStep(msrest.serialization.Model):
"""provisioningStep.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param description:
:type description: str
:param details: detailsInfo.
:type details: dict[str, object]
:param name:
:type name: str
:param provisioning_step_type: Possible values include: "import", "scoping", "matching",
"processing", "referenceResolution", "export", "unknownFutureValue".
:type provisioning_step_type: str or ~reports.models.MicrosoftGraphProvisioningStepType
:param status: Possible values include: "success", "failure", "skipped", "unknownFutureValue".
:type status: str or ~reports.models.MicrosoftGraphProvisioningResult
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'details': {'key': 'details', 'type': '{object}'},
'name': {'key': 'name', 'type': 'str'},
'provisioning_step_type': {'key': 'provisioningStepType', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphProvisioningStep, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.description = kwargs.get('description', None)
self.details = kwargs.get('details', None)
self.name = kwargs.get('name', None)
self.provisioning_step_type = kwargs.get('provisioning_step_type', None)
self.status = kwargs.get('status', None)
class MicrosoftGraphProvisioningSystemDetails(msrest.serialization.Model):
"""provisioningSystemDetails.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param details: detailsInfo.
:type details: dict[str, object]
:param display_name:
:type display_name: str
:param id:
:type id: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'details': {'key': 'details', 'type': '{object}'},
'display_name': {'key': 'displayName', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphProvisioningSystemDetails, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.details = kwargs.get('details', None)
self.display_name = kwargs.get('display_name', None)
self.id = kwargs.get('id', None)
class MicrosoftGraphRelyingPartyDetailedSummary(MicrosoftGraphEntity):
"""relyingPartyDetailedSummary.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param failed_sign_in_count:
:type failed_sign_in_count: long
:param migration_status: Possible values include: "ready", "needsReview",
"additionalStepsRequired", "unknownFutureValue".
:type migration_status: str or ~reports.models.MicrosoftGraphMigrationStatus
:param migration_validation_details:
:type migration_validation_details: list[~reports.models.MicrosoftGraphKeyValuePair]
:param relying_party_id:
:type relying_party_id: str
:param relying_party_name:
:type relying_party_name: str
:param reply_urls:
:type reply_urls: list[str]
:param service_id:
:type service_id: str
:param sign_in_success_rate:
:type sign_in_success_rate: float
:param successful_sign_in_count:
:type successful_sign_in_count: long
:param total_sign_in_count:
:type total_sign_in_count: long
:param unique_user_count:
:type unique_user_count: long
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'failed_sign_in_count': {'key': 'failedSignInCount', 'type': 'long'},
'migration_status': {'key': 'migrationStatus', 'type': 'str'},
'migration_validation_details': {'key': 'migrationValidationDetails', 'type': '[MicrosoftGraphKeyValuePair]'},
'relying_party_id': {'key': 'relyingPartyId', 'type': 'str'},
'relying_party_name': {'key': 'relyingPartyName', 'type': 'str'},
'reply_urls': {'key': 'replyUrls', 'type': '[str]'},
'service_id': {'key': 'serviceId', 'type': 'str'},
'sign_in_success_rate': {'key': 'signInSuccessRate', 'type': 'float'},
'successful_sign_in_count': {'key': 'successfulSignInCount', 'type': 'long'},
'total_sign_in_count': {'key': 'totalSignInCount', 'type': 'long'},
'unique_user_count': {'key': 'uniqueUserCount', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphRelyingPartyDetailedSummary, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.failed_sign_in_count = kwargs.get('failed_sign_in_count', None)
self.migration_status = kwargs.get('migration_status', None)
self.migration_validation_details = kwargs.get('migration_validation_details', None)
self.relying_party_id = kwargs.get('relying_party_id', None)
self.relying_party_name = kwargs.get('relying_party_name', None)
self.reply_urls = kwargs.get('reply_urls', None)
self.service_id = kwargs.get('service_id', None)
self.sign_in_success_rate = kwargs.get('sign_in_success_rate', None)
self.successful_sign_in_count = kwargs.get('successful_sign_in_count', None)
self.total_sign_in_count = kwargs.get('total_sign_in_count', None)
self.unique_user_count = kwargs.get('unique_user_count', None)
class MicrosoftGraphReport(msrest.serialization.Model):
"""Device Configuration profile History reports.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param content: Not yet documented.
:type content: bytes
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'content': {'key': 'content', 'type': 'base64'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphReport, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.content = kwargs.get('content', None)
class MicrosoftGraphReportRoot(MicrosoftGraphEntity):
"""The resource that represents an instance of History Reports.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param application_sign_in_detailed_summary:
:type application_sign_in_detailed_summary:
list[~reports.models.MicrosoftGraphApplicationSignInDetailedSummary]
:param credential_user_registration_details:
:type credential_user_registration_details:
list[~reports.models.MicrosoftGraphCredentialUserRegistrationDetails]
:param user_credential_usage_details:
:type user_credential_usage_details:
list[~reports.models.MicrosoftGraphUserCredentialUsageDetails]
:param daily_print_usage_summaries_by_printer:
:type daily_print_usage_summaries_by_printer:
list[~reports.models.MicrosoftGraphPrintUsageSummaryByPrinter]
:param daily_print_usage_summaries_by_user:
:type daily_print_usage_summaries_by_user:
list[~reports.models.MicrosoftGraphPrintUsageSummaryByUser]
:param monthly_print_usage_summaries_by_printer:
:type monthly_print_usage_summaries_by_printer:
list[~reports.models.MicrosoftGraphPrintUsageSummaryByPrinter]
:param monthly_print_usage_summaries_by_user:
:type monthly_print_usage_summaries_by_user:
list[~reports.models.MicrosoftGraphPrintUsageSummaryByUser]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'application_sign_in_detailed_summary': {'key': 'applicationSignInDetailedSummary', 'type': '[MicrosoftGraphApplicationSignInDetailedSummary]'},
'credential_user_registration_details': {'key': 'credentialUserRegistrationDetails', 'type': '[MicrosoftGraphCredentialUserRegistrationDetails]'},
'user_credential_usage_details': {'key': 'userCredentialUsageDetails', 'type': '[MicrosoftGraphUserCredentialUsageDetails]'},
'daily_print_usage_summaries_by_printer': {'key': 'dailyPrintUsageSummariesByPrinter', 'type': '[MicrosoftGraphPrintUsageSummaryByPrinter]'},
'daily_print_usage_summaries_by_user': {'key': 'dailyPrintUsageSummariesByUser', 'type': '[MicrosoftGraphPrintUsageSummaryByUser]'},
'monthly_print_usage_summaries_by_printer': {'key': 'monthlyPrintUsageSummariesByPrinter', 'type': '[MicrosoftGraphPrintUsageSummaryByPrinter]'},
'monthly_print_usage_summaries_by_user': {'key': 'monthlyPrintUsageSummariesByUser', 'type': '[MicrosoftGraphPrintUsageSummaryByUser]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphReportRoot, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.application_sign_in_detailed_summary = kwargs.get('application_sign_in_detailed_summary', None)
self.credential_user_registration_details = kwargs.get('credential_user_registration_details', None)
self.user_credential_usage_details = kwargs.get('user_credential_usage_details', None)
self.daily_print_usage_summaries_by_printer = kwargs.get('daily_print_usage_summaries_by_printer', None)
self.daily_print_usage_summaries_by_user = kwargs.get('daily_print_usage_summaries_by_user', None)
self.monthly_print_usage_summaries_by_printer = kwargs.get('monthly_print_usage_summaries_by_printer', None)
self.monthly_print_usage_summaries_by_user = kwargs.get('monthly_print_usage_summaries_by_user', None)
class MicrosoftGraphSignIn(MicrosoftGraphEntity):
"""signIn.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param alternate_sign_in_name:
:type alternate_sign_in_name: str
:param app_display_name: App name displayed in the Azure Portal.
:type app_display_name: str
:param app_id: Unique GUID representing the app ID in the Azure | |
self.__innerwire.GetEndpoint()
@property
def Direction(self):
"""
The direction of the wire
Wires may be declared "readonly" or "writeonly" in the service definition file. (If neither
is specified, the wire is assumed to be full duplex.) "readonly" wires may only send packets from
service to client. "writeonly" wires may only send packets from client to service.
See ``MemberDefinition_Direction`` constants for possible return values.
:rtype: int
"""
return self.__innerwire.Direction()
def AsyncClose(self,handler,timeout=2):
"""
Asynchronously close the wire connection
Same as Close() but returns asynchronously
:param handler: A handler function to call on completion, possibly with an exception
:type handler: Callable[[Exception],None]
:param timeout: Timeout in seconds, or -1 for infinite
:type timeout: float
"""
return async_call(self.__innerwire.AsyncClose,(adjust_timeout(timeout),),AsyncVoidReturnDirectorImpl,handler)
@property
def InValue(self):
"""
Get the current InValue
Gets the current InValue that was transmitted from the peer. Throws
ValueNotSetException if no value has been received, or the most
recent value lifespan has expired.
"""
m=self.__innerwire.GetInValue()
return UnpackMessageElement(m,self.__type,self.__obj,self.__innerwire.GetNode())
@property
def OutValue(self):
"""
Set the OutValue and transmit to the peer connection
Sets the OutValue for the wire connection. The specified value will be
transmitted to the peer, and will become the peers InValue. The transmission
is unreliable, meaning that values may be dropped if newer values arrive.
The most recent OutValue may also be read through this property.
"""
m=self.__innerwire.GetOutValue()
return UnpackMessageElement(m,self.__type,self.__obj,self.__innerwire.GetNode())
@OutValue.setter
def OutValue(self,value):
m=PackMessageElement(value,self.__type,self.__obj,self.__innerwire.GetNode())
return self.__innerwire.SetOutValue(m)
@property
def LastValueReceivedTime(self):
"""
Get the timestamp of the last received value
Returns the timestamp of the value in the *senders* clock
:rtype: RobotRaconteur.TimeSpec
"""
return self.__innerwire.GetLastValueReceivedTime()
@property
def LastValueSentTime(self):
"""
Get the timestamp of the last sent value
Returns the timestamp of the last sent value in the *local* clock
:rtype: RobotRaconteur.TimeSpec
"""
return self.__innerwire.GetLastValueSentTime()
@property
def InValueValid(self):
"""
Get if the InValue is valid
The InValue is valid if a value has been received and
the value has not expired
:rtype: bool
"""
return self.__innerwire.GetInValueValid()
@property
def OutValueValid(self):
"""
Get if the OutValue is valid
The OutValue is valid if a value has been
set using the OutValue property
:rtype: bool
"""
return self.__innerwire.GetOutValueValid()
@property
def IgnoreInValue(self):
"""
Set whether wire connection should ignore incoming values
Wire connections may optionally desire to ignore incoming values. This is useful if the connection
is only being used to send out values, and received values may create a potential memory . If ignore is true,
incoming values will be discarded.
:rtype: bool
"""
return self.__innerwire.GetIgnoreInValue()
@IgnoreInValue.setter
def IgnoreInValue(self,value):
self.__innerwire.SetIgnoreInValue(value)
def TryGetInValue(self):
"""
Try getting the InValue, returning true on success or false on failure
Get the current InValue and InValue timestamp. Return true or false on
success or failure instead of throwing exception.
:return: Tuple of success, in value, and timespec
:rtype: Tuple[bool,T,RobotRaconteur.TimeSpec]
"""
res=self.__innerwire.TryGetInValue()
if not res.res:
return (False,None, None)
return (True, UnpackMessageElement(res.value,self.__type,self.__obj,self.__innerwire.GetNode()), res.ts)
def TryGetOutValue(self):
"""
Try getting the OutValue, returning true on success or false on failure
Get the current OutValue and OutValue timestamp. Return true or false on
success and failure instead of throwing exception.
:return: Tuple of success, out value, and timespec
:rtype: Tuple[bool,T,RobotRaconteur.TimeSpec]
"""
res=self.__innerwire.TryGetOutValue()
if not res.res:
return (False,None, None)
return (True, UnpackMessageElement(res.value,self.__type,self.__obj,self.__innerwire.GetNode()), res.ts)
@property
def WireValueChanged(self):
"""
Event hook for wire value change. Use to add handlers to be called
when the InValue changes.
.. code-block:: python
def my_handler(con, value, ts):
# Handle new value
pass
my_wire_connection.WireValueChanged += my_handler
Handler must have signature ``Callable[[RobotRaconteur.WireConnection,T,RobotRaconteur.TimeSpec],None]``
:rtype: RobotRaconteur.EventHook
"""
return self._WireValueChanged
@WireValueChanged.setter
def WireValueChanged(self,evt):
if (evt is not self._WireValueChanged):
raise RuntimeError("Invalid operation")
@property
def InValueLifespan(self):
"""
Set the lifespan of InValue
InValue may optionally have a finite lifespan specified in seconds. Once
the lifespan after reception has expired, the InValue is cleared and becomes invalid.
Attempts to access InValue will result in ValueNotSetException.
InValue lifespans may be used to avoid using a stale value received by the wire. If
the lifespan is not set, the wire will continue to return the last received value, even
if the value is old.
Specify -1 for infinite lifespan.
:rtype: float
"""
t = self.__innerwire.GetInValueLifespan()
if t < 0:
return t
return float(t) / 1000.0
@InValueLifespan.setter
def InValueLifespan(self, secs):
if secs < 0:
self.__innerwire.SetInValueLifespan(-1)
else:
self.__innerwire.SetInValueLifespan(int(secs*1000.0))
@property
def OutValueLifespan(self):
"""
Set the lifespan of OutValue
OutValue may optionally have a finite lifespan specified in seconds. Once
the lifespan after sending has expired, the OutValue is cleared and becomes invalid.
Attempts to access OutValue will result in ValueNotSetException.
OutValue lifespans may be used to avoid using a stale value sent by the wire. If
the lifespan is not set, the wire will continue to return the last sent value, even
if the value is old.
Specify -1 for infinite lifespan.
:rtype: float
"""
t = self.__innerwire.GetOutValueLifespan()
if t < 0:
return t
return float(t) / 1000.0
@OutValueLifespan.setter
def OutValueLifespan(self, secs):
if secs < 0:
self.__innerwire.SetOutValueLifespan(-1)
else:
self.__innerwire.SetOutValueLifespan(int(secs*1000.0))
def GetNode(self):
return self.__innerwire.GetNode()
class WireConnectionDirector(RobotRaconteurPython.WrappedWireConnectionDirector):
def __init__(self,endpoint,type,obj=None,innerep=None):
self.__endpoint=endpoint
self.__type=type
self.__obj=obj
self.__innerep=innerep
super(WireConnectionDirector, self).__init__()
def WireValueChanged(self,value,time):
value2=UnpackMessageElement(value,self.__type,self.__obj,self.__innerep.GetNode())
self.__endpoint.WireValueChanged.fire(self.__endpoint,value2,time)
def WireConnectionClosedCallback(self):
if (not self.__endpoint.WireConnectionClosedCallback is None):
self.__endpoint.WireConnectionClosedCallback(self.__endpoint)
class WireAsyncConnectHandlerImpl(RobotRaconteurPython.AsyncWireConnectionReturnDirector):
def __init__(self,handler,innerpipe,obj):
super(WireAsyncConnectHandlerImpl,self).__init__()
self._handler=handler
self.__innerpipe=innerpipe
self.__obj=obj
def handler(self, innerendpoint, error_info):
if (error_info.error_code!=0):
err=RobotRaconteurPythonError.RobotRaconteurExceptionUtil.ErrorInfoToException(error_info)
self._handler(None,err)
return
try:
outerendpoint=WireConnection(innerendpoint,self.__innerpipe.Type,self.__obj)
director=WireConnectionDirector(outerendpoint,self.__innerpipe.Type,self.__obj,innerendpoint)
innerendpoint.SetRRDirector(director,0)
director.__disown__()
except Exception as err2:
self._handler(None, err2)
return
self._handler(outerendpoint, None)
class WireAsyncPeekReturnDirectorImpl(RobotRaconteurPython.AsyncWirePeekReturnDirector):
def __init__(self,handler,innerpipe,obj):
super(WireAsyncPeekReturnDirectorImpl,self).__init__()
self._handler=handler
self.__innerpipe=innerpipe
self.__obj=obj
def handler(self,m,ts,error_info):
if (error_info.error_code!=0):
err=RobotRaconteurPythonError.RobotRaconteurExceptionUtil.ErrorInfoToException(error_info)
self._handler((None, None), err)
return
value=UnpackMessageElement(m,self.__innerpipe.Type,self.__obj,self.__innerpipe.GetNode())
self._handler((value, ts), None)
class Wire(object):
"""
Wire()
\"wire\" member type interface
The Wire class implements the \"wire\" member type. Wires are declared in service definition files
using the \"wire\" keyword within object declarations. Wires provide "most recent" value streaming
between clients and services. They work by creating "connection" pairs between the client and service.
The wire streams the current value between the wire connection pairs using packets. Wires
are unreliable; only the most recent value is of interest, and any older values
will be dropped. Wire connections have an InValue and an OutValue. Users set the OutValue on the
connection. The new OutValue is transmitted to the peer wire connection, and becomes the peer's
InValue. The peer can then read the InValue. The client and service have their own InValue
and OutValue, meaning that each direction, client to service or service to client, has its own
value.
Wire connections are created using the Connect() or AsyncConnect() functions. Services receive
incoming connection requests through a callback function. Thes callback is configured using
the SetWireConnectCallback() function. Services may also use the WireBroadcaster class
or WireUnicastReceiver class to automate managing wire connection lifecycles. WireBroadcaster
is used to send values to all connected clients. WireUnicastReceiver is used to receive the
value from the most recent wire connection. See WireConnection for details on sending
and receiving streaming values.
Wire clients may also optionally "peek" and "poke" the wire without forming a streaming
connection. This is useful if the client needs to read the InValue or set the OutValue
instantaniously, but does not need continuous updating. PeekInValue() or
AsyncPeekInValue() will retrieve the client's current InValue. PokeOutValue() or
AsyncPokeOutValue() will send a new client OutValue to the service.
PeekOutValue() or AsyncPeekOutValue() will retrieve the last client OutValue received by
the service.
"Peek" and "poke" operations initiated by the client are received on the service using
callbacks. Use PeekInValueCallback, PeekOutValueCallback,
and PokeOutValueCallback to configure the callbacks to handle these requests.
WireBroadcaster and WireUnicastReceiver configure these callbacks automatically, so
the user does not need to configure the callbacks when these classes are used.
Wires can be declared *readonly* or *writeonly*. If neither is specified, the wire is assumed
to be full duplex. *readonly* pipes may only send values from service to client, ie OutValue
on service side and InValue on client side. *writeonly* pipes may only send values from
client to service, ie OutValue on client side and InValue on service side. Use Direction()
to determine the direction of the | |
= Linear([rf], [output_dim], output_dim, random_state=random_state,
name=proj_o_name, init=init, scale=scale, biases=biases, bias_offset=bias_offset,
strict=strict)
return out, att
def TransformerBlock(value, key, query_and_passthrough, output_dim, n_heads=8, mask=False, random_state=None, name=None, debug=False):
if random_state is None:
raise ValueError("Must pass instance of np.random.RandomState!")
if name is None:
name = _get_name()
query = query_and_passthrough
if mask:
mask_att_proj1, mask_att1 = MultiheadAttention(query, query, query, output_dim, n_heads=n_heads, mask=True, random_state=random_state, name=name + "transformerblock_maskmhatt")
mo1 = LayerNorm(query + mask_att_proj1, name=name + "transformerblock_maskmhln")
query = mo1
att_proj1, att1 = MultiheadAttention(value, key, query, output_dim, n_heads=n_heads, mask=False, random_state=random_state, name=name + "transformerblock_mhatt")
o1 = LayerNorm(query + att_proj1, name=name + "transformerblock_mhln")
l1 = Linear([o1], [output_dim], 4 * output_dim, random_state=random_state, name=name + "transformerblock_iff")
rl1 = ReLU(l1)
l2 = Linear([rl1], [4 * output_dim], output_dim, random_state=random_state, name=name + "transformerblock_off")
return LayerNorm(o1 + l2, name=name + "transformerblock_ffln"), att1
def Linear(list_of_inputs, list_of_input_dims, output_dim, random_state=None,
name=None, init=None, scale="default", biases=True, bias_offset=0.,
dropout_flag_prob_keep=None, strict=None):
if random_state is None:
raise ValueError("Must pass instance of np.random.RandomState!")
nd = _ndim(list_of_inputs[0])
input_var = tf.concat(list_of_inputs, axis=nd - 1)
input_dim = sum(list_of_input_dims)
if name is None:
name = _get_name()
name_w = name + "_linear_w"
name_b = name + "_linear_b"
name_out = name + "_linear_out"
if init is None or type(init) is str:
#logger.info("Linear layer {} initialized using init {}".format(name, init))
weight_values, = make_numpy_weights(input_dim, [output_dim],
random_state=random_state,
init=init, scale=scale, name=name_w)
else:
# rely on announcement from parent class
weight_values=init[0]
if strict is None:
strict = get_strict_mode_default()
if strict:
cur_defs = get_params_dict()
if name_w in cur_defs:
raise ValueError("Name {} already created in params dict!".format(name_w))
if name_b in cur_defs:
raise ValueError("Name {} already created in params dict!".format(name_b))
try:
weight = _get_shared(name_w)
except NameError:
weight = tf.Variable(weight_values, trainable=True, name=name_w)
_set_shared(name_w, weight)
if dropout_flag_prob_keep is not None:
input_var = tf.nn.dropout(input_var, dropout_flag_prob_keep, seed=random_state.randint(10000))
out = dot(input_var, weight)
if biases:
if (init is None) or (type(init) is str):
b, = make_numpy_biases([output_dim], name=name_b)
else:
b = init[1]
b = b + bias_offset
try:
biases = _get_shared(name_b)
except NameError:
biases = tf.Variable(b, trainable=True, name=name_b)
_set_shared(name_b, biases)
out = out + biases
out = tf.identity(out, name=name_out)
return out
def Conv2d(list_of_inputs, list_of_input_dims, num_feature_maps,
kernel_size=(3, 3),
dilation=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
border_mode="same",
custom_weight_mask=None,
init=None, scale="default",
biases=True, bias_offset=0.,
name=None, random_state=None, strict=None):
# kernel is H, W
if name is None:
name = _get_name()
if random_state is None:
raise ValueError("Must pass instance of np.random.RandomState!")
if strides != [1, 1, 1, 1]:
if hasattr(strides, "__len__") and len(strides) == 4:
pass
else:
try:
int(strides)
strides = [1, int(strides), int(strides), 1]
except:
raise ValueError("Changing strides by non-int not yet supported")
if dilation != [1, 1, 1, 1]:
raise ValueError("Changing dilation not yet supported")
input_t = tf.concat(list_of_inputs, axis=-1)
input_channels = sum(list_of_input_dims)
input_height = _shape(input_t)[1]
input_width = _shape(input_t)[2]
if type(name) is str:
name_w = name + "_conv2d_w"
name_b = name + "_conv2d_b"
name_out = name + "_conv2d_out"
name_mask = name + "_conv2d_mask"
if strict is None:
strict = get_strict_mode_default()
if strict:
cur_defs = get_params_dict()
if name_w in cur_defs:
raise ValueError("Name {} already created in params dict!".format(name_w))
if name_b in cur_defs:
raise ValueError("Name {} already created in params dict!".format(name_b))
if init is None or type(init) is str:
weight_values, = make_numpy_weights((input_channels, input_width, input_height),
[(num_feature_maps, kernel_size[0], kernel_size[1])],
init=init,
scale=scale,
random_state=random_state, name=name_w)
else:
weight_values = init[0]
name_w = name[0]
try:
weight = _get_shared(name_w)
except NameError:
#logger.info("Conv2d layer {} initialized using init {}".format(name, init))
weight = tf.Variable(weight_values, trainable=True, name=name_w)
_set_shared(name_w, weight)
if custom_weight_mask is not None:
"""
try:
mask = _get_shared(name_mask)
except NameError:
mask = tf.Variable(custom_weight_mask, trainable=False, name=name_mask)
_set_shared(name_mask, mask)
"""
weight = tf.constant(custom_weight_mask) * weight
if border_mode == "same":
pad = "SAME"
elif border_mode == "valid":
pad = "VALID"
else:
try:
int(border_mode)
new_pad = [0, int(border_mode), int(border_mode), 0]
input_t = tf.pad(input_t, [[new_pad[0]] * 2,
[new_pad[1]] * 2,
[new_pad[2]] * 2,
[new_pad[3]] * 2], "CONSTANT")
except:
try:
# assume it is a custom list border pad
# https://stackoverflow.com/questions/37659538/custom-padding-for-convolutions-in-tensorflow
new_pad = [int(bi) for bi in border_mode]
input_t = tf.pad(input_t, [[new_pad[0]] * 2,
[new_pad[1]] * 2,
[new_pad[2]] * 2,
[new_pad[3]] * 2], "CONSTANT")
except:
try:
# custom padded border mode
len(border_mode[0])
new_pad = border_mode
assert len(new_pad) == 4
for np in new_pad:
assert len(np) == 2
input_t = tf.pad(input_t, [[new_pad[0][0], new_pad[0][1]],
[new_pad[1][0], new_pad[1][1]],
[new_pad[2][0], new_pad[2][1]],
[new_pad[3][0], new_pad[3][1]]], "CONSTANT")
except:
raise ValueError("Unknown border_mode {} specified".format(border_mode))
pad = "VALID"
out = tf.nn.conv2d(input_t, weight, strides, padding=pad)
if biases:
if (init is None) or (type(init) is str):
b, = make_numpy_biases([num_feature_maps], name=name_b)
else:
b = init[1]
name_b = name[1]
name_out = name[2]
b = b + bias_offset
try:
biases = _get_shared(name_b)
except NameError:
biases = tf.Variable(b, trainable=True, name=name_b)
_set_shared(name_b, biases)
out = out + biases[None, None, None]
out = tf.identity(out, name=name_out)
return out
def GatedMaskedConv2d(list_of_v_inputs, list_of_v_input_dims,
list_of_h_inputs, list_of_h_input_dims,
num_feature_maps,
residual=True,
conditioning_class_input=None,
conditioning_num_classes=None,
conditioning_spatial_map=None,
conditioning_spatial_map_kernel_size=None,
kernel_size=(3, 3),
dilation=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
mask_type="img_B",
border_mode="same",
init=None, scale="default",
biases=True, bias_offset=0.,
name=None, random_state=None, strict=None):
# Special thanks to <NAME> for example code
# https://github.com/ritheshkumar95/pytorch-vqvae/blob/master/modules.py#L136
# do it with nonsquare conv
# kernel is H, W
if random_state is None:
raise ValueError("Must pass instance of np.random.RandomState!")
if name is None:
name = _get_name()
if kernel_size[0] != kernel_size[1] or kernel_size[0] % 2 != 1:
raise ValueError("Kernel size must be odd, and square e.g. (3, 3)")
name_vert = name + "_gated_masked_vert"
name_horiz = name + "_gated_masked_horiz"
name_vert2horiz = name + "_gated_masked_vert2horiz"
name_horiz_res = name + "_gated_masked_conv2d_horiz_res"
name_embed = name + "_gated_masked_class_embed"
name_spatial = name + "_gated_masked_spatial_cond"
name_m_v = name + "_gated_masked_conv2d_mask_vert"
name_m_h = name + "_gated_masked_conv2d_mask_horiz"
if conditioning_class_input != None:
if conditioning_num_classes is None:
raise ValueError("If passing conditioning_class_input, must pass conditioning_num_classes")
n_embeds = _shape(conditioning_class_input)[-1]
if n_embeds == 1:
c_e, emb = Embedding(conditioning_class_input, conditioning_num_classes,
2 * num_feature_maps, random_state=random_state, name=name_embed)
else:
logger.info("GatedMaskedConv2d embedding input has dimension {} on last axis, creating {} embeddings".format(n_embeds, n_embeds))
c_e = None
for ii in range(n_embeds):
c_ei, embi = Embedding(conditioning_class_input[:, ii][:, None], conditioning_num_classes,
2 * num_feature_maps, random_state=random_state, name=name_embed + "_{}".format(ii))
if c_e is None:
c_e = c_ei
else:
c_e += c_ei
shp = _shape(c_e)
if len(shp) != 2:
raise ValueError("conditioning_embed result should be 2D (input (N, 1)), got {}".format(shp))
if conditioning_spatial_map != None:
shp = _shape(conditioning_spatial_map)
if conditioning_spatial_map_kernel_size is None:
conditioning_spatial_map_kernel_size = kernel_size
spatial_c_e = Conv2d([conditioning_spatial_map], [shp[-1]], 2 * num_feature_maps,
kernel_size=conditioning_spatial_map_kernel_size,
dilation=dilation,
strides=strides,
border_mode="same",
init=init, scale=scale,
biases=biases, bias_offset=bias_offset,
name=name_spatial, random_state=random_state, strict=strict)
if strict is None:
strict = get_strict_mode_default()
if strict:
cur_defs = get_params_dict()
if name_m_v in cur_defs:
raise ValueError("Name {} already created in params dict!".format(name_m_v))
if name_m_h in cur_defs:
raise ValueError("Name {} already created in params dict!".format(name_m_h))
input_t = tf.concat(list_of_v_inputs, axis=-1)
input_channels = sum(list_of_v_input_dims)
input_height = _shape(input_t)[1]
input_width = _shape(input_t)[2]
output_channels = num_feature_maps
# left pad by the exact correct amount...
vert_kernel = (kernel_size[0] // 2 + 1, kernel_size[1])
bpad_v = ((0, 0), (kernel_size[0] // 2, 0), (kernel_size[1] // 2, kernel_size[1] // 2), (0, 0))
mask_v = np.ones((kernel_size[0] // 2 + 1, kernel_size[1], input_channels, 2 * output_channels)).astype("float32")
"""
vert_kernel = (kernel_size[0], kernel_size[1])
bpad_v = ((0, 0), (kernel_size[0] // 2, kernel_size[0] // 2), (kernel_size[1] // 2, kernel_size[1] // 2), (0, 0))
mask_v = np.ones((kernel_size[0], kernel_size[1], input_channels, 2 * output_channels)).astype("float32")
"""
# https://github.com/kkleidal/GatedPixelCNNPyTorch/blob/master/note-on-conv-masking.ipynb
if mask_type == "img_A":
mask_v[-1] = 0.
# only need to mask last element of weights (self row) on vert
vert = Conv2d(list_of_v_inputs, list_of_v_input_dims,
2 * num_feature_maps,
kernel_size=vert_kernel,
dilation=dilation,
strides=strides,
custom_weight_mask=mask_v,
border_mode=bpad_v,
init=init, scale=scale,
biases=biases, bias_offset=bias_offset,
name=name_vert, random_state=random_state, strict=strict)
elif mask_type == "img_B":
vert = Conv2d(list_of_v_inputs, list_of_v_input_dims, 2 * num_feature_maps,
kernel_size=vert_kernel,
dilation=dilation,
strides=strides,
border_mode=bpad_v,
init=init, scale=scale,
biases=biases, bias_offset=bias_offset,
name=name_vert, random_state=random_state, strict=strict)
else:
raise ValueError("Unknown mask_type argument {}".format(mask_type))
horiz_kernel = (1, kernel_size[1] // 2 + 1)
bpad_h = ((0, 0), (0, 0), (kernel_size[1] // 2, 0), (0, 0))
mask_h = np.ones((1, kernel_size[1] // 2 + 1, input_channels, 2 * output_channels)).astype("float32")
if mask_type == "img_A":
mask_h[:, -1] = 0.
# only need to mask last element of weights (self col) on horiz
horiz = Conv2d(list_of_h_inputs, list_of_h_input_dims, 2 * num_feature_maps,
kernel_size=horiz_kernel,
dilation=dilation,
strides=strides,
custom_weight_mask=mask_h,
border_mode=bpad_h,
init=init, scale=scale,
biases=biases, bias_offset=bias_offset,
name=name_horiz, random_state=random_state, strict=strict)
else:
horiz = Conv2d(list_of_h_inputs, list_of_h_input_dims, 2 * num_feature_maps,
kernel_size=horiz_kernel,
dilation=dilation,
strides=strides,
border_mode=bpad_h,
init=init, scale=scale,
biases=biases, bias_offset=bias_offset,
name=name_horiz, random_state=random_state, | |
<filename>src/picolo/shapes/shapes.py<gh_stars>1-10
"""
@package shapes
@author <NAME>
@version 0.1
@brief Contains classes for Shape, FourierShape, ZernikeShape, UnitCellShape,
and factory methods shape_factory_from_values and shape_factory_from_coords
"""
# import from standard library
import math
import copy
import warnings
import logging
# import external packages
import numpy as np
import matplotlib.pyplot as plt
# import modules in this package
from config import BravaisLattice
class Shape:
""" Base class for shape descriptors.
Stores an ordered list of named variables that specify the
components of the shape descriptor vector,
and the value associated with each variable.
Implement build_from_coords() and _postprocessing()
to provide shape-specific functionality.
"""
def __init__(self, var_names=[], vals=[], **kwargs):
""" Constructor
@param var_names List of variable names
@param vals List of numbers for the value of each variable
@param **kwargs Optional arguments, to be set as attributes
"""
# test
if len(var_names) != len(vals):
msg = 'Variables and vals must have same length. '
msg += 'Got lengths %d and %d' % (len(var_names), len(vals))
raise ValueError(msg)
# set up variables and vals
self._var_names = []
self._vals = np.asarray([])
for iv in range(len(vals)):
self.put_component(var_names[iv], vals[iv])
# store any keyword-args
if kwargs:
self._params = kwargs
else:
self._params = dict()
# clean up
self._postprocessing()
def _postprocessing(self):
""" Implement this method to provide class-specific functionality
after construction. """
# (in)validate
if len(self._var_names) == 0:
self.invalidate()
else:
self.put_param('is_valid', True)
# set type
self.put_param('type', 'Generic')
def __len__(self):
return len(self._vals)
def get(self, var_name):
""" Returns named variable or parameter called var_name.
If variable not found, raises KeyError. """
if var_name in self._var_names:
iv = self._var_names.index(var_name)
return self._vals[iv]
elif var_name in self._params:
return self._params[var_name]
else:
raise KeyError("Nothing found for %s in vars (%s) or params (%s)" % (str(var_name),
', '.join(self._var_names),
', '.join(self._params.keys())))
def get_vals(self, norm=False):
""" Returns ndarray of values, normalized only if requested. """
if norm:
return self._vals / self.mag()
else:
return self._vals
def get_components(self, norm=False):
""" Returns list of all variable names. """
return self._var_names
def iter_components(self):
""" Iterate over (var_name, val) pairs of feature vector components. """
for iv in range(len(self._var_names)):
yield self._var_names[iv], self._vals[iv]
def iter_params(self):
""" Iterate over (var_name, val) pairs of non-feature vector parameters. """
for var, val in self._params.iteritems():
yield var, val
def copy(self):
""" Deep copy of self. """
return copy.deepcopy(self)
def mag(self):
""" Get magnitude of vector of vals. """
return np.linalg.norm(self._vals)
def subset(self, new_var_names):
""" Returns a copy of the shape with some components removed.
@param new_var_names List of variable names to keep
@retval Shape object of the same type as self
"""
# make copy of self
new_shape = self.copy()
# check that new names are a subset of old names
if not new_var_names <= new_shape._var_names:
extra_vars = set(new_var_names) - set(new_shape._var_names)
extra_var_strings = [str(var) for var in extra_vars]
msg = 'New variables must be a subset of existing variables. '
msg += 'Got extra variables %s' % ', '.join(extra_var_strings)
raise ValueError(msg)
# drop unneeded vals
for name in self._var_names:
if name not in new_var_names:
new_shape.drop_component(name)
# return
return new_shape
def has_component(self, var_name):
""" Returns True if there is a variable that matches var_name,
False otherwise.
"""
if var_name in self._var_names:
return True
else:
return False
def drop_component(self, var_name):
""" Removes variable called var_name if it is present;
does nothing otherwise.
"""
if self.has_component(var_name):
iv = self._var_names.index(var_name)
del self._var_names[iv]
self._vals = np.delete(self._vals, self._vals[iv])
def put_component(self, var_name, val):
""" Sets variable called var_name with value val;
overwrites value if the variable already exists.
"""
if self.has_component(var_name):
iv = self._var_names.index(var_name)
self._vals[iv] = val
else:
self._var_names.append(var_name)
self._vals = np.append(self._vals, val)
def put_param(self, attr_name, val):
""" Sets parameter attribute with value val;
overwrites value if the parameter already exists.
"""
self._params[attr_name] = val
def invalidate(self):
""" Mark this shape as invalid. """
self.put_param('is_valid', False)
self._vals = np.empty_like(self._vals)
def build_from_coords(self, neighbor_coords):
""" Implement this method to provide class-specific functionality
for calculating the shape descriptor variables from a set of
points.
"""
pass
class FourierShape(Shape):
""" Shape object where component variables are rotation-invariant
Fourier descriptors.
"""
def _postprocessing(self):
# if no var names set, set some by default
if len(self._var_names) == 0:
self._var_names = range(2, 25, 2)
self._vals = np.zeros(len(self._var_names))
self.invalidate()
else:
self.put_param('is_valid', True)
# convert variable names to ints if needed
# assuming they are FourierShape-like l variable names
for iv in range(len(self._var_names)):
self._var_names[iv] = int(self._var_names[iv])
# set type
self.put_param('type', 'Fourier')
def build_from_coords(self, neighbor_coords):
""" Update with rotation invariant Fourier descriptors of neighboring
points about reference position, indexed by ints l.
@param self The object pointer
@param neighbor_coords List of Coord objects for neighboring points,
with the origin at the reference position
"""
# set up storage
self.put_param('n_neighbors', len(neighbor_coords))
complexvals = np.zeros(len(self._var_names), dtype=complex)
# loop over neighbors
for coord in neighbor_coords:
# add component to running sum
for iv in self._var_names:
index = self._var_names.index(iv)
complexvals.real[index] += math.cos(iv*coord.theta)
complexvals.imag[index] += math.sin(iv*coord.theta)
# normalize sums
complexvals /= float(max(self.get('n_neighbors'), 1))
# set vals
for index, iv in enumerate(self._var_names):
val = np.abs(complexvals[index])
self.put_component(iv, val)
# validate
self.put_param('is_valid', True)
class ZernikeShape(Shape):
""" Shape object where component variables are rotation-invariant
Zernike moments, indexed by tuples of ints (n,m).
Required parameters to set with put_param:
neighbor_dist = radius of the disc on which radial Zernike
polynomials are defined
"""
def _postprocessing(self):
# if no var names set, set some by default
if len(self._var_names) == 0:
self._var_names = range(2, 25)
self.invalidate()
else:
self.put_param('is_valid', True)
# convert variable names to tuples (n,m) if needed
# assuming they are FourierShape-like l variable names
if len(self._var_names) > 0:
if not isinstance(self._var_names[0], tuple):
warnings.warn('dropping all values from ZernikeShape',
RuntimeWarning)
nms = self._ls2nms(self._var_names)
self._var_names = []
self._vals = np.asarray([])
for nm in nms:
self.put_component(nm, 0)
# set type
self.put_param('type', 'Zernike')
def build_from_coords(self, neighbor_coords):
""" Update with Zernike rotation invariant moments corresponding to
positions of neighboring points about reference position.
@param self The object pointer
@param neighbor_coords List of Coord objects for neighboring points,
with the origin at the reference position
"""
# notation follows "Invariant Image Recognition by Zernike Moments"
# by <NAME> Hong, 1990 IEEE.
# set up storage
self.put_param('n_neighbors', len(neighbor_coords))
complexvals = np.zeros(len(self._var_names), dtype=complex)
ns = [self._nm2n(nm) for nm in self._var_names]
ms = [self._nm2m(nm) for nm in self._var_names]
# loop over neighbors
for coord in neighbor_coords:
# add component to running sum
rscaled = coord.r / self.get('neighbor_dist')
for iv in range(len(self._var_names)):
rnm = self._rnm(ns[iv], ms[iv], rscaled)
coeff = rnm * (ns[iv]+1.0)/math.pi
complexvals.real[iv] += math.cos(ms[iv]*coord.theta) * coeff
complexvals.imag[iv] += math.sin(ms[iv]*coord.theta) * coeff
# normalize sum
complexvals /= float(max(self.get('n_neighbors'), 1))
# set vals
for iv in range(len(self._var_names)):
val = np.abs(complexvals[iv])
self.put_component(self._var_names[iv], val)
# validate
self.put_param('is_valid', True)
def _ls2nms(self, ls):
""" Convert list of Fourier indices to (n,m) Zernike index pairs
@param self The object pointer
@param ls List for Fourier index
@retval nms List of 2-tuples of ints
"""
nms = []
for l in ls:
if l % 2 == 1:
# max(m) < l
nms += [(l, m) for m in range(0, l, 2)]
else:
# max(m) == l
nms += [(l, m) for m in range(0, l+1, 2)]
return nms
def _nm2n(self, nm):
return nm[0]
def _nm2m(self, nm):
return nm[1]
def _rnm(self, n, m, r):
""" Compute radial polynomial part of Zernike polynomial """
r_sum = 0
m = int(abs(m))
u = int((n-m)/2)
v = int((n+m)/2)
for s in range(0, u+1):
numerator = pow(-1, s) * math.factorial(int(n-s)) * pow(r, n-2*s)
try:
denominator = math.factorial(s) * math.factorial(v-s) * math.factorial(u-s)
except ValueError:
raise ValueError('(s,n,m,u,v) = (%d,%d,%d,%d,%d)' % (s, n, m, u, v))
r_sum += numerator / denominator
return r_sum
class UnitCellShape(Shape):
""" Shape object where component variables are 2d unit cell parameters
(a, b, angle).
a | |
#! /usr/bin/env python
"""
Script to download sitemaps and check if they have obvious text in them.
Reports success and failures and number of docs that have text
"""
import asyncio
import json
import os
import re
from asyncio import Semaphore
from collections import defaultdict, Counter
from datetime import datetime
from typing import Sequence, Dict, Generator, Iterable, List, Optional, Tuple
import aiohttp
import click
import json5
from aiohttp import ClientSession, ClientResponseError
from attr import attrs, attrib
from bs4 import BeautifulSoup, Tag
from lxml import etree, objectify
from lxml.etree import XMLSyntaxError
from pymongo import MongoClient, DESCENDING, HASHED
from pymongo.collection import Collection
from extraction.downloadsitemaps import Sitemap
from extraction.utils import get_sitemap_collection
VOA_CORPUS = "voa_corpus"
VAR_UTAG_PATTERN = re.compile(r"var\s+utag_data\s*=\s*({.*})")
@attrs(frozen=True, auto_attribs=True)
class SitemapFile:
filename: str
sitemap: Sitemap
@classmethod
def from_fields(cls, fields: Sequence[str]) -> "SitemapFile":
return SitemapFile(fields[0], Sitemap(*fields[1:]))
def to_dict(self):
ret = {}
ret["filename"] = self.filename
ret["sitemap"] = self.sitemap.to_dict()
return ret
@attrs(frozen=True, auto_attribs=True)
class Page:
sitemap_prov: SitemapFile
url: Optional[str]
sitemap_last_modified: Optional[datetime]
changefreq: Optional[str]
priority: Optional[str]
metadata: Dict = attrib(factory=dict)
archive_url: Optional[str] = attrib(default=None)
@classmethod
def from_node(cls, node, sitemap: SitemapFile) -> "Page":
timestamp: Optional[datetime] = None
url: Optional[str] = None
changefreq: Optional[str] = None
priority: Optional[str] = None
video = {}
news = {}
metadata = {}
for tag in node:
if etree.QName(tag).localname == "loc":
url = tag.text
elif etree.QName(tag).localname == "lastmod":
timestamp = cls.parse_timestamp(tag.text)
elif etree.QName(tag).localname == "priority":
priority = tag.text
elif etree.QName(tag).localname == "changefreq":
changefreq = tag.text
elif etree.QName(tag).localname == "news":
news = gather_subtags(tag)
elif etree.QName(tag).localname == "video":
video = gather_subtags(tag)
else:
print("Name not handled:")
print(etree.QName(tag).localname)
print()
if news:
metadata["news"] = news
if video:
metadata["video"] = video
return Page(sitemap, url, timestamp, changefreq, priority, metadata)
@classmethod
def parse_timestamp(cls, s: Optional[str]) -> Optional[datetime]:
"""
Fix timestamps of format 2021-06-08T16:03:48.75Z
Copied from fix timestamps code, not yet tested.
"""
# In the case that the sitemaps didn't have a timestamp just pass along what they had
if not s or s == "None":
return None
# Hacking to get format correct. Trim the millisecond decimals if needed
if len(s) >= len("2021-06-13T02:05:46.170195Z"):
s = s[: len("2021-06-13T02:05:46.170195")] + "Z"
try:
return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%fZ")
except:
try:
return datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ")
except:
return datetime.fromisoformat(s)
def to_dict(self):
ret = {}
ret.update(self.metadata)
ret["url"] = self.url
ret["sitemap_last_modified"] = self.sitemap_last_modified
ret["changefreq"] = self.changefreq
ret["priority"] = self.priority
ret["sitemap_prov"] = self.sitemap_prov.to_dict()
ret["archive_url"] = self.archive_url
return ret
@classmethod
def from_json(cls, page_json: Dict):
sitemap_prov = page_json["sitemap_prov"]
sitemap = SitemapFile(
sitemap_prov["filename"], Sitemap.from_json(sitemap_prov["sitemap"])
)
url = page_json["url"]
# Handle old version with timestamp or new version with sitemap_last_modified
if "timestamp" in page_json:
timestamp = cls.parse_timestamp(page_json["timestamp"])
else:
timestamp = cls.parse_timestamp(page_json["sitemap_last_modified"])
changefreq = page_json["changefreq"]
priority = page_json["priority"]
metadata = {}
if page_json.get("news"):
metadata["news"] = page_json.get("news")
if page_json.get("video"):
metadata["video"] = page_json.get("video")
archive_url = page_json.get("archive_url")
return Page(
sitemap,
url,
timestamp,
changefreq,
priority,
metadata,
archive_url=archive_url,
)
def read_filemap(filemap: str) -> Dict[str, List[SitemapFile]]:
sitemaps = defaultdict(list)
with open(filemap, "r", encoding="utf8") as f:
for i, line in enumerate(f):
# Skip header
if i == 0:
continue
# make a sitemaps by language, store each sitemap with it's prov info
fields = line.split("\t")
sitemaps[fields[2]].append(
SitemapFile.from_fields(
# filename, url, iso, language, site_name, timestamp, region
(
fields[0],
fields[1],
fields[2],
fields[3],
fields[4],
fields[5],
fields[6],
)
)
)
return sitemaps
def gather_subtags(tag):
"""There's a bunch of subtags on video and news, so just throw those in a dict for now"""
return {etree.QName(subtag).localname: subtag.text for subtag in tag}
def pages_from_sitemaps(
sitemap_list: Sequence[SitemapFile], sitemap_dir: str
) -> Generator[Page, None, None]:
existing_urls = set()
for sitemap in sitemap_list:
with open(os.path.join(sitemap_dir, sitemap.filename), "rb") as site_file:
try:
tree = etree.XML(site_file.read())
except XMLSyntaxError as e:
print(f"Couldn't parse {sitemap.filename}. {e}")
continue
objectify.deannotate(tree, cleanup_namespaces=True)
for node in tree:
page = Page.from_node(node, sitemap)
if not page.url:
continue
if page.url in existing_urls:
continue
existing_urls.add(page.url)
yield page
def is_valid(text: str) -> bool:
"""
Simple check to eliminate obviously bad text in paragraph tags.
"""
text = text.strip()
if not text:
return False
elif text.startswith("No media source currently available"):
return False
elif text.startswith("Already have an account?"):
return False
elif text.startswith("Log in"):
return False
elif text.startswith("Sign up"):
return False
elif text.startswith("Not a registered user?"):
return False
else:
return True
@attrs(frozen=True, auto_attribs=True)
class PageResult:
success: bool
time_retrieved: datetime
content: Optional[str]
error_message: Optional[str]
async def request_page(url: Optional[str], session: ClientSession) -> PageResult:
success = False
error_message = None
timestamp = datetime.now()
# timestamp = datetimenow.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if not url:
return PageResult(success, timestamp, None, "Missing url")
try:
resp = await session.request(method="GET", url=url)
resp.raise_for_status()
print(f"Got response [{resp.status}] for URL: {url}")
success = True
content = await resp.text()
return PageResult(success, timestamp, content, error_message)
except ClientResponseError as e:
print(e)
return PageResult(success, timestamp, None, str(e))
except Exception as e:
print(e)
return PageResult(success, timestamp, None, str(e))
def extract_utag_data(scripts: Optional[Sequence[Tag]], url: Optional[str]) -> Dict:
if scripts is None:
# Has no scripts so return empty
return {}
try:
for script in scripts:
# text = script.getText()
if script:
match = VAR_UTAG_PATTERN.search(str(script))
if match:
return json5.loads(match.group(1))
except ValueError as e:
with open("utag_data.log", "a", encoding="utf8") as logfile:
print(url, file=logfile)
print(e, file=logfile)
print()
return {}
# Couldn't find a match so return empty
return {}
def extract_ld_json(scripts: Optional[Sequence[Tag]]) -> Dict:
if scripts is None:
# Has no scripts so return empty
return {}
for script in scripts:
# text = script.getText()
if script:
if script.contents:
json_text = script.contents[0]
if json_text:
return json5.loads(json_text)
# Couldn't find a match so return empty
return {}
async def scrape_page(
page: Page, session: ClientSession, mongo_collection: Collection, sem: Semaphore
):
# # Bare except is bad, but not clear what error is thrown
# failures += 1
async with sem:
page_result = await request_page(page.url, session)
has_ptags = False
html_tag_metadata = {}
# text = ""
if page_result.content is not None:
soup = BeautifulSoup(page_result.content, features="lxml")
paragraphs = soup.find_all("p")
if paragraphs:
texts = [p.getText() for p in paragraphs if is_valid(p.getText())]
if texts:
# text = "\n".join(texts)
has_ptags = True
title = soup.find("meta", {"name": "title"})
description = soup.find("meta", {"name": "description"})
canonical_link = soup.find("link", {"rel": "canonical"})
keywords = soup.find("meta", {"name": "keywords"})
authors = soup.find_all("meta", {"name": "Author"})
author_list = [author["content"] for author in authors]
title = await get_content(title)
description = await get_content(description)
canonical_link = await get_content(canonical_link, "href")
keywords = await get_content(keywords)
if keywords:
keywords = [keyword.strip() for keyword in keywords.split(",")]
else:
keywords = []
scripts = soup.find_all("script", {"type": "text/javascript"})
utag_data = extract_utag_data(scripts, page.url)
ld_scripts = soup.find_all("script", {"type": "application/ld+json"})
ld_json = extract_ld_json(ld_scripts)
date_published = ld_json.get("datePublished")
date_modified = ld_json.get("dateModified")
if date_published:
date_published = datetime.fromisoformat(date_published)
if date_modified:
date_modified = datetime.strptime(date_modified, "%Y-%m-%d %H:%M:%SZ")
html_tag_metadata = {
"title": title,
"description": description,
"keywords": keywords,
"canonical_link": canonical_link,
"utag_data": utag_data,
"content_type": utag_data.get("content_type"),
"has_ptags": has_ptags,
"date_published": date_published,
"date_modified": date_modified,
"application_ld_json": ld_json,
"authors": author_list,
}
await insert_page(
mongo_collection, page_result, html_tag_metadata=html_tag_metadata, page=page
)
async def get_content(tag, att_name: str = "content") -> Optional[str]:
if tag:
tag = tag.get(att_name, None)
return tag
async def insert_page(
collection: Collection,
page_result: PageResult,
*,
html_tag_metadata: Dict,
page: Page,
):
document = page.to_dict()
document.update(html_tag_metadata)
document["original_html"] = str(page_result.content)
document["success"] = page_result.success
document["language"] = page.sitemap_prov.sitemap.language
document["iso"] = page.sitemap_prov.sitemap.iso
document["time_retrieved"] = page_result.time_retrieved
document["error_message"] = page_result.error_message
if page_result.success and document.get("canonical_link"):
existing_docids = [
doc["_id"]
for doc in collection.find({"canonical_link": document["canonical_link"]})
]
existing_docids.extend(
[doc["_id"] for doc in collection.find({"url": document["url"]})]
)
existing_docids = list(set(existing_docids))
if existing_docids and document["success"]:
# Collision of canonical links, check and set latest if not an error
document["latest"] = True
try:
collection.insert_one(document)
collection.update_many(
{"_id": {"$in": existing_docids}}, {"$set": {"latest": False}}
)
except UnicodeEncodeError as e:
print(f"Unicode error on {page.url}")
document = page.to_dict()
# Set latest flag to false so we can still grab latest error-free version of
# canonical link
document["latest"] = False
document.update(
{
"error_message": str(e),
"success": False,
"iso": page.sitemap_prov.sitemap.iso,
"language": page.sitemap_prov.sitemap.language,
"time_retrieved": page_result.time_retrieved,
}
)
collection.insert_one(document)
else:
# Insert document normally
try:
document["latest"] = True
collection.insert_one(document)
except UnicodeEncodeError as e:
print(f"Unicode error on {page.url}")
document = page.to_dict()
document["latest"] = False
document.update(
{
"error_message": str(e),
"success": False,
"iso": page.sitemap_prov.sitemap.iso,
"language": page.sitemap_prov.sitemap.language,
"time_retrieved": page_result.time_retrieved,
}
)
collection.insert_one(document)
async def scrape_and_insert(
sitemap_collection: Collection,
pages_generator: Iterable[Page],
num_connections: int = 8,
):
sem = asyncio.Semaphore(num_connections)
connector = aiohttp.TCPConnector(limit_per_host=50)
async with ClientSession(connector=connector) as session:
tasks = []
for page in pages_generator:
tasks.append(
asyncio.ensure_future(
scrape_page(
page,
session=session,
mongo_collection=sitemap_collection,
sem=sem,
)
)
)
# await asyncio.sleep(1)
await asyncio.gather(*tasks)
@click.group()
def scraper_cli():
pass
@scraper_cli.command()
@click.argument("filemap")
@click.argument("sitemap_dir")
@click.option("--languages", "-l", multiple=True)
@click.option("--exclude-languages", "-e", multiple=True)
@click.option("--doc-limit", type=int)
@click.option("--drop-database", type=bool, default=False)
@click.option("--port", default=27200, type=int)
@click.option("--num-connections", default=8, type=int)
def scrape(
filemap: str,
sitemap_dir: str,
languages: Tuple[str, | |
<filename>aiida/orm/computers.py
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module for Computer entities"""
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
from aiida.common import exceptions
from aiida.common.lang import classproperty
from aiida.manage.manager import get_manager
from aiida.plugins import SchedulerFactory, TransportFactory
from . import entities, users
if TYPE_CHECKING:
from aiida.orm import AuthInfo, User
from aiida.orm.implementation import Backend, BackendComputer
from aiida.schedulers import Scheduler
from aiida.transports import Transport
__all__ = ('Computer',)
class ComputerCollection(entities.Collection['Computer']):
"""The collection of Computer entries."""
@staticmethod
def _entity_base_cls() -> Type['Computer']:
return Computer
def get_or_create(self, label: Optional[str] = None, **kwargs) -> Tuple[bool, 'Computer']:
"""
Try to retrieve a Computer from the DB with the given arguments;
create (and store) a new Computer if such a Computer was not present yet.
:param label: computer label
:return: (computer, created) where computer is the computer (new or existing,
in any case already stored) and created is a boolean saying
"""
if not label:
raise ValueError('Computer label must be provided')
try:
return False, self.get(label=label)
except exceptions.NotExistent:
return True, Computer(backend=self.backend, label=label, **kwargs)
def list_labels(self) -> List[str]:
"""Return a list with all the labels of the computers in the DB."""
return self._backend.computers.list_names()
def delete(self, pk: int) -> None:
"""Delete the computer with the given id"""
return self._backend.computers.delete(pk)
class Computer(entities.Entity['BackendComputer']):
"""
Computer entity.
"""
# pylint: disable=too-many-public-methods
_logger = logging.getLogger(__name__)
PROPERTY_MINIMUM_SCHEDULER_POLL_INTERVAL = 'minimum_scheduler_poll_interval' # pylint: disable=invalid-name
PROPERTY_MINIMUM_SCHEDULER_POLL_INTERVAL__DEFAULT = 10. # pylint: disable=invalid-name
PROPERTY_WORKDIR = 'workdir'
PROPERTY_SHEBANG = 'shebang'
Collection = ComputerCollection
@classproperty
def objects(cls) -> ComputerCollection: # pylint: disable=no-self-argument
return ComputerCollection.get_cached(cls, get_manager().get_backend())
def __init__( # pylint: disable=too-many-arguments
self,
label: str = None,
hostname: str = None,
description: str = '',
transport_type: str = '',
scheduler_type: str = '',
workdir: str = None,
backend: Optional['Backend'] = None,
) -> None:
"""Construct a new computer."""
backend = backend or get_manager().get_backend()
model = backend.computers.create(
label=label,
hostname=hostname,
description=description,
transport_type=transport_type,
scheduler_type=scheduler_type
)
super().__init__(model)
if workdir is not None:
self.set_workdir(workdir)
def __repr__(self):
return f'<{self.__class__.__name__}: {str(self)}>'
def __str__(self):
return f'{self.label} ({self.hostname}), pk: {self.pk}'
@property
def uuid(self) -> str:
"""Return the UUID for this computer.
This identifier is unique across all entities types and backend instances.
:return: the entity uuid
"""
return self._backend_entity.uuid
@property
def logger(self) -> logging.Logger:
return self._logger
@classmethod
def _label_validator(cls, label: str) -> None:
"""
Validates the label.
"""
if not label.strip():
raise exceptions.ValidationError('No label specified')
@classmethod
def _hostname_validator(cls, hostname: str) -> None:
"""
Validates the hostname.
"""
if not hostname.strip():
raise exceptions.ValidationError('No hostname specified')
@classmethod
def _description_validator(cls, description: str) -> None:
"""
Validates the description.
"""
# The description is always valid
@classmethod
def _transport_type_validator(cls, transport_type: str) -> None:
"""
Validates the transport string.
"""
from aiida.plugins.entry_point import get_entry_point_names
if transport_type not in get_entry_point_names('aiida.transports'):
raise exceptions.ValidationError('The specified transport is not a valid one')
@classmethod
def _scheduler_type_validator(cls, scheduler_type: str) -> None:
"""
Validates the transport string.
"""
from aiida.plugins.entry_point import get_entry_point_names
if scheduler_type not in get_entry_point_names('aiida.schedulers'):
raise exceptions.ValidationError(f'The specified scheduler `{scheduler_type}` is not a valid one')
@classmethod
def _prepend_text_validator(cls, prepend_text: str) -> None:
"""
Validates the prepend text string.
"""
# no validation done
@classmethod
def _append_text_validator(cls, append_text: str) -> None:
"""
Validates the append text string.
"""
# no validation done
@classmethod
def _workdir_validator(cls, workdir: str) -> None:
"""
Validates the transport string.
"""
if not workdir.strip():
raise exceptions.ValidationError('No workdir specified')
try:
convertedwd = workdir.format(username='test')
except KeyError as exc:
raise exceptions.ValidationError(f'In workdir there is an unknown replacement field {exc.args[0]}')
except ValueError as exc:
raise exceptions.ValidationError(f"Error in the string: '{exc}'")
if not os.path.isabs(convertedwd):
raise exceptions.ValidationError('The workdir must be an absolute path')
def _mpirun_command_validator(self, mpirun_cmd: Union[List[str], Tuple[str, ...]]) -> None:
"""
Validates the mpirun_command variable. MUST be called after properly
checking for a valid scheduler.
"""
if not isinstance(mpirun_cmd, (tuple, list)) or not all(isinstance(i, str) for i in mpirun_cmd):
raise exceptions.ValidationError('the mpirun_command must be a list of strings')
try:
job_resource_keys = self.get_scheduler().job_resource_class.get_valid_keys()
except exceptions.EntryPointError:
raise exceptions.ValidationError('Unable to load the scheduler for this computer')
subst = {i: 'value' for i in job_resource_keys}
subst['tot_num_mpiprocs'] = 'value'
try:
for arg in mpirun_cmd:
arg.format(**subst)
except KeyError as exc:
raise exceptions.ValidationError(f'In workdir there is an unknown replacement field {exc.args[0]}')
except ValueError as exc:
raise exceptions.ValidationError(f"Error in the string: '{exc}'")
def validate(self) -> None:
"""
Check if the attributes and files retrieved from the DB are valid.
Raise a ValidationError if something is wrong.
Must be able to work even before storing: therefore, use the get_attr and similar methods
that automatically read either from the DB or from the internal attribute cache.
For the base class, this is always valid. Subclasses will reimplement this.
In the subclass, always call the super().validate() method first!
"""
if not self.label.strip():
raise exceptions.ValidationError('No name specified')
self._label_validator(self.label)
self._hostname_validator(self.hostname)
self._description_validator(self.description)
self._transport_type_validator(self.transport_type)
self._scheduler_type_validator(self.scheduler_type)
self._workdir_validator(self.get_workdir())
try:
mpirun_cmd = self.get_mpirun_command()
except exceptions.DbContentError:
raise exceptions.ValidationError('Error in the DB content of the metadata')
# To be called AFTER the validation of the scheduler
self._mpirun_command_validator(mpirun_cmd)
@classmethod
def _default_mpiprocs_per_machine_validator(cls, def_cpus_per_machine: Optional[int]) -> None:
"""
Validates the default number of CPUs per machine (node)
"""
if def_cpus_per_machine is None:
return
if not isinstance(def_cpus_per_machine, int) or def_cpus_per_machine <= 0:
raise exceptions.ValidationError(
'Invalid value for default_mpiprocs_per_machine, must be a positive integer, or an empty string if you '
'do not want to provide a default value.'
)
def copy(self) -> 'Computer':
"""
Return a copy of the current object to work with, not stored yet.
"""
return Computer.from_backend_entity(self._backend_entity.copy())
def store(self) -> 'Computer':
"""
Store the computer in the DB.
Differently from Nodes, a computer can be re-stored if its properties
are to be changed (e.g. a new mpirun command, etc.)
"""
self.validate()
return super().store()
@property
def label(self) -> str:
"""Return the computer label.
:return: the label.
"""
return self._backend_entity.label
@label.setter
def label(self, value: str) -> None:
"""Set the computer label.
:param value: the label to set.
"""
self._backend_entity.set_label(value)
@property
def description(self) -> str:
"""Return the computer computer.
:return: the description.
"""
return self._backend_entity.description
@description.setter
def description(self, value: str) -> None:
"""Set the computer description.
:param value: the description to set.
"""
self._backend_entity.set_description(value)
@property
def hostname(self) -> str:
"""Return the computer hostname.
:return: the hostname.
"""
return self._backend_entity.hostname
@hostname.setter
def hostname(self, value: str) -> None:
"""Set the computer hostname.
:param value: the hostname to set.
"""
self._backend_entity.set_hostname(value)
@property
def scheduler_type(self) -> str:
"""Return the computer scheduler type.
:return: the scheduler type.
"""
return self._backend_entity.get_scheduler_type()
@scheduler_type.setter
def scheduler_type(self, value: str) -> None:
"""Set the computer scheduler type.
:param value: the scheduler type to set.
"""
self._backend_entity.set_scheduler_type(value)
@property
def transport_type(self) -> str:
"""Return the computer transport type.
:return: the transport_type.
"""
return self._backend_entity.get_transport_type()
@transport_type.setter
def transport_type(self, value: str) -> None:
"""Set the computer transport type.
:param value: the transport_type to set.
"""
self._backend_entity.set_transport_type(value)
@property
def metadata(self) -> Dict[str, Any]:
"""Return the computer metadata.
:return: the metadata.
"""
return self._backend_entity.get_metadata()
@metadata.setter
def metadata(self, value: Dict[str, Any]) -> None:
"""Set the computer metadata.
:param value: the metadata to set.
"""
self._backend_entity.set_metadata(value)
def delete_property(self, name: str, raise_exception: bool = True) -> None:
"""
Delete a property from this computer
:param name: the name of the property
:param raise_exception: if True raise if the property does not exist, otherwise return None
"""
olddata = self.metadata
try:
del olddata[name]
self.metadata = olddata
except KeyError:
if raise_exception:
raise AttributeError(f"'{name}' property not found")
def set_property(self, name: str, value: Any) -> None:
"""Set a property on this computer
:param name: the property name
:param value: the new value
"""
metadata = self.metadata or {}
metadata[name] = value
self.metadata = metadata
def get_property(self, name: str, *args: Any) -> Any:
"""Get a property of this computer
:param name: the property name
:param args: additional arguments
:return: the property value
"""
if len(args) > 1:
raise TypeError('get_property expected at most 2 arguments')
olddata = self.metadata
try:
| |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import inspect
from typing import Dict, List, Optional, Tuple, Union
import torch
import copy
from torch import nn
import math
import numpy as np
import torch.nn.functional as F
from torch.autograd.function import Function
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.config import configurable
from detectron2.layers import batched_nms, ShapeSpec
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.modeling.roi_heads.box_head import build_box_head
from detectron2.modeling.roi_heads.keypoint_head import build_keypoint_head
from detectron2.modeling.roi_heads.mask_head import build_mask_head
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals, add_ground_truth_to_proposals_single_image
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads.roi_heads import select_foreground_proposals, select_proposals_with_visible_keypoints, ROIHeads
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.sampling import subsample_labels
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from detectron2.utils.env import TORCH_VERSION
from .mypooler import MyROIPooler
from .my_fast_rcnn_output import MyFastRCNNOutputLayers
__all__ = ["TransformerROIHeads", "CascadeTransformerROIHeads"]
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def add_noise_to_boxes(boxes):
cxcy_boxes = box_xyxy_to_cxcywh(boxes)
resize_factor = torch.rand(cxcy_boxes.shape, device=cxcy_boxes.device)
new_cxcy = cxcy_boxes[..., :2] + cxcy_boxes[..., 2:] * (resize_factor[..., :2] - 0.5) * 0.2
assert (cxcy_boxes[..., 2:] > 0).all().item()
new_wh = cxcy_boxes[..., 2:] * (0.8 ** (resize_factor[..., 2:] * 2 - 1))
assert (new_wh > 0).all().item()
new_cxcy_boxes = torch.cat([new_cxcy, new_wh], dim=-1)
new_boxes = box_cxcywh_to_xyxy(new_cxcy_boxes)
return new_boxes
@ROI_HEADS_REGISTRY.register()
class TransformerROIHeads(ROIHeads):
"""
It's "standard" in a sense that there is no ROI transform sharing
or feature sharing between tasks.
Each head independently processes the input features by each head's
own pooler and head.
This class is used by most models, such as FPN and C5.
To implement more models, you can subclass it and implement a different
:meth:`forward()` or a head.
"""
@configurable
def __init__(
self,
*,
box_in_features: List[str],
box_pooler: MyROIPooler,
box_head: nn.Module,
box_predictor: nn.Module,
mask_in_features: Optional[List[str]] = None,
mask_pooler: Optional[MyROIPooler] = None,
mask_head: Optional[nn.Module] = None,
keypoint_in_features: Optional[List[str]] = None,
keypoint_pooler: Optional[MyROIPooler] = None,
keypoint_head: Optional[nn.Module] = None,
train_on_pred_boxes: bool = False,
add_noise_to_proposals: bool = False,
encoder_feature: Optional[str] = None,
random_sample_size: bool = False,
random_sample_size_upper_bound: float = 1.0,
random_sample_size_lower_bound: float = 0.8,
random_proposal_drop: bool = False,
random_proposal_drop_upper_bound: float = 1.0,
random_proposal_drop_lower_bound: float = 0.8,
max_proposal_per_batch: int = 0,
visualize: bool = False,
**kwargs
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
mask_in_features (list[str]): list of feature names to use for the mask head.
None if not using mask head.
mask_pooler (ROIPooler): pooler to extra region features for mask head
mask_head (nn.Module): transform features to make mask predictions
keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask*``.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super().__init__(**kwargs)
# keep self.in_features for backward compatibility
self.in_features = self.box_in_features = box_in_features
self.box_pooler = box_pooler
self.box_head = box_head
self.box_predictor = box_predictor
self.mask_on = mask_in_features is not None
if self.mask_on:
self.mask_in_features = mask_in_features
self.mask_pooler = mask_pooler
self.mask_head = mask_head
self.keypoint_on = keypoint_in_features is not None
if self.keypoint_on:
self.keypoint_in_features = keypoint_in_features
self.keypoint_pooler = keypoint_pooler
self.keypoint_head = keypoint_head
self.train_on_pred_boxes = train_on_pred_boxes
self.add_noise_to_proposals = add_noise_to_proposals
self.encoder_feature = encoder_feature
self.random_sample_size = random_sample_size
self.random_proposal_drop = random_proposal_drop
self.max_proposal_per_batch = max_proposal_per_batch
self.random_proposal_drop_upper_bound = random_proposal_drop_upper_bound
self.random_proposal_drop_lower_bound = random_proposal_drop_lower_bound
self.random_sample_size_upper_bound = random_sample_size_upper_bound
self.random_sample_size_lower_bound = random_sample_size_lower_bound
self.visualize = visualize
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg)
ret["visualize"] = cfg.MODEL.VISUALIZE
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
ret["add_noise_to_proposals"] = cfg.MODEL.ROI_BOX_HEAD.ADD_NOISE_TO_PROPOSALS
ret["encoder_feature"] = cfg.MODEL.ROI_BOX_HEAD.ENCODER_FEATURE
ret["random_sample_size"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_SAMPLE_SIZE
ret["random_sample_size_upper_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_SAMPLE_SIZE_UPPER_BOUND
ret["random_sample_size_lower_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_SAMPLE_SIZE_LOWER_BOUND
ret["random_proposal_drop"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_PROPOSAL_DROP
ret["random_proposal_drop_upper_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_PROPOSAL_DROP_UPPER_BOUND
ret["random_proposal_drop_lower_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_PROPOSAL_DROP_LOWER_BOUND
ret["max_proposal_per_batch"] = cfg.MODEL.ROI_BOX_HEAD.MAX_PROPOSAL_PER_BATCH
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if inspect.ismethod(cls._init_box_head):
ret.update(cls._init_box_head(cfg, input_shape))
if inspect.ismethod(cls._init_mask_head):
ret.update(cls._init_mask_head(cfg, input_shape))
if inspect.ismethod(cls._init_keypoint_head):
ret.update(cls._init_keypoint_head(cfg, input_shape))
ret["proposal_matcher"] = Matcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=False,
)
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = MyROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
box_predictor = MyFastRCNNOutputLayers(cfg, box_head.output_shape)
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
@classmethod
def _init_mask_head(cls, cfg, input_shape):
if not cfg.MODEL.MASK_ON:
return {}
else:
raise NotImplementedError
@classmethod
def _init_keypoint_head(cls, cfg, input_shape):
if not cfg.MODEL.KEYPOINT_ON:
return {}
else:
raise NotImplementedError
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
if self.training:
losses = self._forward_box(features, proposals, targets)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
if self.visualize:
pred_instances, attention_maps = self._forward_box(features, proposals)
else:
attention_maps = None
pred_instances = self._forward_box(features, proposals)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
if self.visualize:
for instance, proposal in zip(pred_instances, proposals):
instance._fields["proposal"] = proposal.proposal_boxes.tensor
for instance, attention in zip(pred_instances, attention_maps):
instance._fields["attention"] = attention
return pred_instances, {}
def forward_with_given_boxes(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
) -> List[Instances]:
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
This is useful for downstream tasks where a box is known, but need to obtain
other attributes (outputs of other heads).
Test-time augmentation also uses this.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (list[Instances]):
the same `Instances` objects, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
instances = self._forward_mask(features, instances)
instances = self._forward_keypoint(features, instances)
return instances
def _forward_box(
self, features: Dict[str, torch.Tensor], proposals: List[Instances], targets=None
):
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
box_features = [features[f] for f in self.box_in_features]
padded_box_features, dec_mask, inds_to_padded_inds = (
self.box_pooler(box_features, [x.proposal_boxes for x in proposals]))
enc_feature = None
enc_mask = None
if self.box_head.use_encoder_decoder:
enc_feature = features[self.encoder_feature]
b = len(proposals)
h = max([x.image_size[0] for x in proposals])
w = max([x.image_size[1] for x in proposals])
enc_mask = torch.ones((b, h, w), dtype=torch.bool, device=padded_box_features.device)
for c, image_size in enumerate([x.image_size for x in proposals]):
enc_mask[c, :image_size[0], :image_size[1]] = False
names = ["res1", "res2", "res3", "res4", "res5"]
if self.encoder_feature == "p6":
names.append("p6")
for | |
RQ(dim, par)
class GaussianProcessModel(Model):
"""
Gaussian process regression model of the integrand in the Bayesian quadrature.
Parameters
----------
dim : int
Number of input dimensions.
kern_par : ndarray
Kernel parameters in matrix.
kern_str : str
Acronym of the covariance function of the Gaussian process model.
point_str : str
Acronym for the sigma-point set to use in BQ.
point_par : dict
Parameters of the sigma-point set.
estimate_par : bool, optional
If `True`, kernel parameters will be estimated. Applies only under `MarginalInference`.
"""
def __init__(self, dim, kern_par, kern_str, point_str, point_par=None, estimate_par=False):
super(GaussianProcessModel, self).__init__(dim, kern_par, kern_str, point_str, point_par, estimate_par)
def predict(self, test_data, fcn_obs, x_obs=None, par=None):
"""
Gaussian process predictions.
Parameters
----------
test_data : ndarray
Test data, shape (D, M).
fcn_obs : ndarray
Observations of the integrand at sigma-points.
x_obs : ndarray
Training inputs.
par : ndarray
Kernel parameters.
Returns
-------
mean : ndarray
Predictive mean.
var : ndarray
Predictive variance.
"""
if x_obs is None:
x_obs = self.points
par = self.kernel.get_parameters(par)
iK = self.kernel.eval_inv_dot(par, x_obs)
kx = self.kernel.eval(par, test_data, x_obs)
kxx = self.kernel.eval(par, test_data, test_data, diag=True)
# GP mean and predictive variance
mean = np.squeeze(kx.dot(iK).dot(fcn_obs.T))
var = np.squeeze(kxx - np.einsum('im,mn,ni->i', kx, iK, kx.T))
return mean, var
def bq_weights(self, par, *args):
par = self.kernel.get_parameters(par)
x = self.points
# inverse kernel matrix
iK = self.kernel.eval_inv_dot(par, x, scaling=False)
# Kernel expectations
q = self.kernel.exp_x_kx(par, x)
Q = self.kernel.exp_x_kxkx(par, par, x)
R = self.kernel.exp_x_xkx(par, x)
# BQ weights in terms of kernel expectations
w_m = q.dot(iK)
w_c = iK.dot(Q).dot(iK)
w_cc = R.dot(iK)
# save the kernel expectations for later
self.q, self.Q, self.iK = q, Q, iK
# expected model variance
self.model_var = self.kernel.exp_x_kxx(par) * (1 - np.trace(Q.dot(iK)))
# integral variance
self.integral_var = self.kernel.exp_xy_kxy(par) - q.T.dot(iK).dot(q)
# covariance weights should be symmetric
if not np.array_equal(w_c, w_c.T):
w_c = 0.5 * (w_c + w_c.T)
return w_m, w_c, w_cc, self.model_var, self.integral_var
def exp_model_variance(self, par, *args):
iK = self.kernel.eval_inv_dot(par, self.points)
Q = self.kernel.exp_x_kxkx(par, par, self.points)
return self.kernel.exp_x_kxx(par) * (1 - np.trace(Q.dot(iK)))
def integral_variance(self, par, *args):
par = self.kernel.get_parameters(par) # if par None returns default kernel parameters
q = self.kernel.exp_x_kx(par, self.points)
iK = self.kernel.eval_inv_dot(par, self.points, scaling=False)
kbar = self.kernel.exp_xy_kxy(par)
return kbar - q.T.dot(iK).dot(q)
def neg_log_marginal_likelihood(self, log_par, fcn_obs, x_obs, jitter):
"""
Negative marginal log-likelihood of single-output Gaussian process regression model.
The likelihood is given by
.. math::
-\\log p(Y \\mid X, \\theta) = -\\sum_{e=1}^{\\mathrm{E}} \\log p(y_e \\mid X, \\theta)
where :math:`y_e` is e-th column of :math:`Y`. We have the same parameters :math:`\\theta` for all outputs,
which is more limiting than the multi-output case. For single-output dimension the expression is equivalent to
negative marginal log-likelihood.
Parameters
----------
log_par : (num_par, ) ndarray
Kernel log-parameters.
fcn_obs : (num_pts, dim_out) ndarray
Function values.
x_obs : ndarray
Function inputs.
jitter : ndarray
Regularization term for kernel matrix inversion.
Returns
-------
: float
Negative log-likelihood and gradient for given parameter.
Notes
-----
Used as an objective function by the `Model.optimize()` to find an estimate of the kernel parameters.
"""
# convert from log-par to par
par = np.exp(log_par)
num_data, num_out = fcn_obs.shape
K = self.kernel.eval(par, x_obs) + jitter # (N, N)
L = la.cho_factor(K) # jitter included from eval
a = la.cho_solve(L, fcn_obs) # (N, E)
y_dot_a = np.einsum('ij, ji', fcn_obs.T, a) # sum of diagonal of A.T.dot(A)
a_out_a = np.einsum('i...j, ...jn', a, a.T) # (N, N) sum over of outer products of columns of A
# negative total NLML
nlml = num_out * np.sum(np.log(np.diag(L[0]))) + 0.5 * (y_dot_a + num_out * num_data * np.log(2 * np.pi))
# negative marginal log-likelihood derivatives w.r.t. hyper-parameters
dK_dTheta = self.kernel.der_par(par, x_obs) # (N, N, num_hyp)
iKdK = la.cho_solve(L, dK_dTheta)
# gradient of total NLML
dnlml_dtheta = 0.5 * np.trace((num_out * iKdK - a_out_a.dot(dK_dTheta))) # (num_par, )
return nlml, dnlml_dtheta
class BayesSardModel(Model):
"""
Gaussian process model for Bayes-Sard quadrature. The model has multivariate polynomial prior mean.
Parameters
----------
dim : int
Dimension of the points (integration domain).
kern_par : ndarray
Kernel parameters in a vector.
tdeg : int, optional
Total degree of the multivariate polynomial GP prior mean.
points : str, optional
String abbreviation for the point-set.
point_par : dict, optional
Any parameters for constructing desired point-set.
"""
def __init__(self, dim, kern_par, multi_ind=2, point_str='ut', point_par=None, estimate_par=False):
super(BayesSardModel, self).__init__(dim, kern_par, 'rbf', point_str, point_par, estimate_par)
if type(multi_ind) is int:
# multi-index: monomials of total degree <= multi_ind
self.mulind = []
for td in range(multi_ind + 1):
self.mulind.append(n_sum_k(dim, td))
self.mulind = np.hstack(self.mulind)
elif type(multi_ind) is np.ndarray:
self.mulind = multi_ind
else:
raise ValueError('Multi-index error: multi-index has to be either int or ndarray')
def _exp_x_px(self, multi_ind):
"""
Compute expectation \\mathbb{E}[p(x)^T]_{q} for all :math:`q`. The expectation is equal to
.. math::
\\prod_{d=1}^D (\\alpha_d^q - 1)!!
when :math:`\\alpha^q_d` is even :math:`\\forall q`. Otherwise the expectation is zero.
Parameters
----------
multi_ind : (D, Q) ndarray
Matrix of multi-indices. Each column is a multi-index :math:`\\alpha^q \\in \\mathbb{N}_0^D` defining one
of the Q multivariate polynomial basis functions.
Returns
-------
: (Q, ) ndarray
Vector of expectations.
"""
dim, num_basis = multi_ind.shape
alpha = multi_ind - 1
result = np.zeros((num_basis, ))
for q in range(num_basis):
all_even = np.all(multi_ind[:, q] % 2 == 0)
if all_even:
result[q] = np.prod([factorial2(alpha[d, q], exact=True) for d in range(dim)])
return result
def _exp_x_xpx(self, multi_ind):
"""
Compute expectation \\mathbb{E}[xp(x)^T]_{eq} for all :math:`e` and :math:`q`. The expectation is equal to
.. math::
\\alpha^q_e\\prod_{d \neq e} (\\alpha^q_d - 1)!!
when :math:`\\alpha^q_e + 1` is even and :math:`\\alpha^q_d, \\forall d \neq e` are even.
Otherwise the expectation is zero.
Parameters
----------
multi_ind : (D, Q) ndarray
Matrix of multi-indices. Each column is a multi-index :math:`\\alpha^q \\in \\mathbb{N}_0^D` defining one
of the Q multivariate polynomial basis functions.
Returns
-------
: (D, Q) ndarray
Matrix of expectations.
"""
dim, num_bases = multi_ind.shape
d_ind = np.arange(dim)
result = np.zeros(multi_ind.shape)
for d in range(dim):
for q in range(num_bases):
# all remaining multi-indices even? # i.e. none are odd?
alpha_min_d = multi_ind[d_ind != d, q]
all_even = np.all(alpha_min_d % 2 == 0)
if (multi_ind[d, q] + 1) % 2 == 0 and all_even:
amd_fact2 = [factorial2(amd - 1, exact=True) for amd in alpha_min_d]
result[d, q] = multi_ind[d, q]*np.prod(amd_fact2)
else:
result[d, q] = 0
return result
def _exp_x_pxpx(self, multi_ind):
"""
Compute expectation \\mathbb{E}[p(x)p(x)^T]_{rq} for all :math:`r` and :math:`q`. The expectation is equal to
.. math::
\\prod_{d = 1}^D (\\alpha^q_d + \\alpha^r_d - 1)!!
when :math:`\\forall d,\\quad \\alpha^q_d + \\alpha^r_d` are even (where :math:`r` and :math:`q` are fixed).
Otherwise the expectation is zero.
Parameters
----------
multi_ind : (D, Q) ndarray
Matrix of multi-indices. Each column is a multi-index :math:`\\alpha^q \\in \\mathbb{N}_0^D` defining one
of the Q multivariate polynomial basis functions.
Returns
-------
: (Q, Q) ndarray
Matrix of expectations.
"""
dim, num_bases = multi_ind.shape
result = np.zeros((num_bases, num_bases))
for r in range(num_bases):
for q in range(num_bases):
all_even = np.all((multi_ind[:, r] + multi_ind[:, q]) % 2 == 0)
if all_even:
apa_fact2 = [factorial2(multi_ind[d, r] + multi_ind[d, q] - 1, exact=True) for d in range(dim)]
result[r, q] = np.prod(apa_fact2)
else:
result[r, q] = 0
return result
def _exp_x_kxpx(self, par, multi_ind, x):
"""
Compute expectation :math:`\\mathbb{E}[k(x)p(x)^T]_{nq}`. For given :math:`n` and :math:`q`, the expectation is
given by
.. math::
\\prod_{d=1}^D\left[ a_{ijd} b_{ijd} \right]
where
.. math::
a_{ijd} = \\ell_d(1+\ell^2_d)^{-(1+\\alpha_{dj})/2} \\exp\left(-\\frac{x_{dj}^2}{2(1+\ell_d^2)}\right)
.. math::
b_{ijd} = \\sum_{m=0}^{\left\lfloor \\alpha_{dj}/2 \right\rfloor}
\\frac{\\alpha_{dj}!}{2^m m! (\\alpha_{dj} - 2m)!}
\\ell_d^{2m}\left(\\frac{x_{di}}{\\sqrt{1+\\ell^2_d}}\right)^{\\alpha_{dj}-2m}
Parameters
----------
par : (dim, ) ndarray
Kernel parameters.
multi_ind : (D, Q) ndarray
Matrix of multi-indices. Each column is a multi-index :math:`\\alpha^q \\in \\mathbb{N}_0^D` defining one
of the Q multivariate polynomial basis functions.
x : (dim, N) ndarray
Data points.
Returns
-------
: (N, Q) ndarray
Matrix of expectations.
"""
dim, num_bases = multi_ind.shape
num_pts = x.shape[1]
scale, sqrt_inv_lam = self.kernel._unpack_parameters(par)
ell = np.diag(sqrt_inv_lam) ** -2
result = np.zeros((num_pts, num_bases))
dim_zeros = np.zeros((dim, ))
fact = lambda num: factorial(num, exact=True)
for n in range(num_pts):
for q in range(num_bases):
# compute factors in the product
temp = dim_zeros.copy()
for d in range(dim):
| |
<reponame>CrafterSvK/liberapay.com<gh_stars>0
from base64 import b64decode, b64encode
from binascii import hexlify, unhexlify
from datetime import date, datetime, timedelta
import errno
import fnmatch
from hashlib import sha256
import hmac
from operator import getitem
import os
import re
import socket
from urllib.parse import quote as urlquote
from pando import Response, json
from pando.utils import to_rfc822, utcnow
from markupsafe import Markup
from liberapay.constants import SAFE_METHODS
from liberapay.elsewhere._paginators import _modify_query
from liberapay.exceptions import (
AccountSuspended, AuthRequired, ClosedAccount, LoginRequired, TooManyAdminActions
)
from liberapay.models.community import Community
from liberapay.i18n.base import LOCALE_EN, add_helpers_to_context
from liberapay.website import website
from liberapay.utils import cbor
BEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')
def get_participant(state, restrict=True, redirect_stub=True, allow_member=False,
block_suspended_user=False, redirect_canon=True):
"""Given a Request, raise Response or return Participant.
If restrict is True then we'll restrict access to owners and admins.
"""
request = state['request']
response = state['response']
user = state['user']
slug = request.path['username']
_ = state['_']
if restrict and user.ANON:
raise LoginRequired
if slug.startswith('~'):
try:
value = int(slug[1:])
except ValueError:
raise response.error(404)
participant = user if user and user.id == value else None
elif slug:
value = slug.lower()
participant = user if user and user.username.lower() == value else None
else:
raise response.error(404)
if participant is None:
if type(value) is int:
participant = website.db.Participant.from_id(value, _raise=False)
else:
participant = website.db.Participant.from_username(value)
if participant is None:
if type(value) is str:
look_up_redirections(request, response)
raise response.error(404)
elif participant.kind == 'community':
c_name = website.db.one("""
SELECT name
FROM communities
WHERE participant = %s
""", (participant.id,))
raise response.redirect('/for/%s' % c_name)
if redirect_canon and request.method in SAFE_METHODS:
if slug != participant.username:
canon = '/' + participant.username + request.line.uri.decoded[len(slug)+1:]
raise response.redirect(canon)
status = participant.status
if status == 'closed':
if not user.is_admin:
raise ClosedAccount(participant)
elif status == 'stub':
if redirect_stub:
to = participant.resolve_stub()
if not to:
# Account has been taken over
raise response.error(404)
raise response.redirect(to)
if restrict:
if participant != user:
if allow_member and participant.kind == 'group' and user.member_of(participant):
pass
elif user.is_admin:
log_admin_request(user, participant, request)
else:
raise response.error(403, _("You are not authorized to access this page."))
if block_suspended_user and participant.is_suspended and participant == user:
raise AccountSuspended()
if allow_member and (user == participant or participant.kind == 'group' and user.member_of(participant)):
state['can_switch_account'] = True
return participant
def get_community(state, restrict=False):
request, response = state['request'], state['response']
user = state['user']
name = request.path['name']
c = Community.from_name(name)
if request.method in SAFE_METHODS:
if not c:
response.redirect('/for/new?name=' + urlquote(name))
if c.name != name:
response.redirect('/for/' + c.name + request.line.uri.decoded[5+len(name):])
elif not c:
raise response.error(404)
elif user.ANON:
raise AuthRequired
if restrict:
if user.ANON:
raise LoginRequired
if user.id != c.creator:
if user.is_admin:
log_admin_request(user, c.participant, request)
else:
_ = state['_']
raise response.error(403, _("You are not authorized to access this page."))
return c
def log_admin_request(admin, participant, request):
if request.method not in SAFE_METHODS:
website.db.hit_rate_limit('admin.http-unsafe', admin.id, TooManyAdminActions)
action_data = {
'method': request.method,
'path': request.path.raw,
'qs': dict(request.qs),
'body': {
k: (v[0] if len(v) == 1 else v)
for k, v in request.body.items()
if k != 'csrf_token'
},
}
participant.add_event(website.db, 'admin_request', action_data, admin.id)
def look_up_redirections(request, response):
path = request.path.raw
if not path.endswith('/'):
path += '/'
r = website.db.one("""
SELECT *
FROM redirections
WHERE %s LIKE from_prefix
""", (path.lower(),))
if r:
location = r.to_prefix + path[len(r.from_prefix.rstrip('%')):]
response.redirect(location.rstrip('/'))
def form_post_success(state, msg='', redirect_url=None):
"""This function is meant to be called after a successful form POST.
"""
request, response = state['request'], state['response']
if request.headers.get(b'X-Requested-With') == b'XMLHttpRequest':
raise response.json({"msg": msg} if msg else {})
else:
if not redirect_url:
redirect_url = request.body.get('back_to') or request.line.uri.decoded
redirect_url = response.sanitize_untrusted_url(redirect_url)
redirect_url = _modify_query(redirect_url, 'success', b64encode_s(msg))
response.redirect(redirect_url)
def b64decode_s(s, **kw):
def error():
if 'default' in kw:
return kw['default']
raise Response(400, "invalid base64 input")
try:
s = s.encode('ascii') if hasattr(s, 'encode') else s
except UnicodeError:
return error()
udecode = lambda a: a.decode('utf8')
if s[:1] == b'.':
udecode = lambda a: a
s = s[1:]
s = s.replace(b'~', b'=')
try:
return udecode(b64decode(s, '-_'))
except Exception:
try:
# For retrocompatibility
return udecode(b64decode(s))
except Exception:
pass
return error()
def b64encode_s(s):
prefix = b''
if not isinstance(s, bytes):
s = s.encode('utf8')
else:
# Check whether the string is binary or already utf8
try:
s.decode('utf8')
except UnicodeError:
prefix = b'.'
r = prefix + b64encode(s, b'-_').replace(b'=', b'~')
return r.decode('ascii')
def excerpt_intro(text, length=175):
if not text:
return ''
if isinstance(text, Markup):
i = text.find('</p>')
if i != -1:
text = text[:i]
text = text.striptags().strip()
else:
text = text.lstrip().split('\n', 1)[0].rstrip()
if len(text) > length:
text = text[:length]
if text[-1] == '.':
# don't add an ellipsis directly after a dot
return text + ' […]'
if text[-1] != ' ':
# try to avoid cutting a word
i = text.rfind(' ')
if i > 0.9 * length:
text = text[:i+1]
return text + '…'
return text
def is_card_expired(exp_year, exp_month):
today = date.today()
cur_year, cur_month = today.year, today.month
return exp_year < cur_year or exp_year == cur_year and exp_month < cur_month
def get_owner_name(account):
if not account:
return ''
if account.PersonType == 'NATURAL':
return account.FirstName + ' ' + account.LastName
else:
return account.Name
def get_owner_address(bank_account, mp_account):
if not mp_account:
return ''
if bank_account:
addr = bank_account.OwnerAddress
elif mp_account.PersonType == 'NATURAL':
addr = mp_account.Address
else:
addr = mp_account.HeadquartersAddress
if not addr.Country:
return None
return addr
def obfuscate(n, x, y):
return n[:x] + 'x'*len(n[x:y]) + n[y:]
def ensure_str(s):
if isinstance(s, str):
return s
return s.decode('ascii') if isinstance(s, bytes) else s.encode('ascii')
def set_cookie(cookies, key, value, expires=None, httponly=True, path='/', samesite='lax'):
key = ensure_str(key)
cookies[key] = ensure_str(value)
cookie = cookies[key]
if expires:
if isinstance(expires, timedelta):
expires += utcnow()
if isinstance(expires, datetime):
expires = to_rfc822(expires)
cookie['expires'] = ensure_str(expires)
if httponly:
cookie['httponly'] = True
if path:
cookie['path'] = ensure_str(path)
if samesite:
cookie['samesite'] = ensure_str(samesite)
if website.cookie_domain:
cookie['domain'] = ensure_str(website.cookie_domain)
if website.canonical_scheme == 'https':
cookie['secure'] = True
def erase_cookie(cookies, key, **kw):
set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)
def to_javascript(obj):
"""For when you want to inject an object into a <script> tag.
"""
return json.dumps(obj).replace('</', '<\\/')
svg_attrs_re = re.compile(r'\s+(?:height|width|x|y|xmlns)=(["\']).*?\1')
def include_svg(svg, height, width, x=None, y=None):
"""For when you want to include an SVG in an HTML page or in another SVG.
"""
assert svg.startswith('<svg')
i = svg.find('>')
assert i != -1
d = locals()
attrs = svg_attrs_re.sub('', svg[4:i])
for a in ('height', 'width', 'x', 'y'):
v = d[a]
if v is None:
continue
attrs += ' %s="%s"' % (a, v)
return Markup(svg[:4] + attrs + svg[i:])
def group_by(iterable, key, attr=False, ignored_exceptions=KeyError):
r = {}
if callable(key):
for obj in iterable:
k = key(obj)
r.setdefault(k, []).append(obj)
return r
f = getattr if attr else getitem
for obj in iterable:
try:
k = f(obj, key)
except ignored_exceptions:
continue
r.setdefault(k, []).append(obj)
return r
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(root, filename)
def serialize(context):
for k, v in context.items():
if callable(getattr(v, '_asdict', None)):
context[k] = v._asdict()
return b'\\x' + hexlify(cbor.dumps(context, canonical=True))
def deserialize(context):
if isinstance(context, memoryview) and context[:2].tobytes() == b'\\x':
context = unhexlify(context[2:])
return cbor.loads(context)
def pid_exists(pid):
"""Check whether pid exists in the current process table. UNIX only.
Source: http://stackoverflow.com/a/6940314/2729778
"""
if not pid > 0:
raise ValueError("bad PID %s" % pid)
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def build_s3_object_url(key):
now = utcnow()
timestamp = now.strftime('%Y%m%dT%H%M%SZ')
today = timestamp.split('T', 1)[0]
region = website.app_conf.s3_region
access_key = website.app_conf.s3_public_access_key
endpoint = website.app_conf.s3_endpoint
assert endpoint.startswith('https://')
host = endpoint[8:]
querystring = (
"X-Amz-Algorithm=AWS4-HMAC-SHA256&"
"X-Amz-Credential={access_key}%2F{today}%2F{region}%2Fs3%2Faws4_request&"
"X-Amz-Date={timestamp}&"
"X-Amz-Expires=86400&"
"X-Amz-SignedHeaders=host"
).format(**locals())
canonical_request = (
"GET\n"
"/{key}\n"
"{querystring}\n"
"host:{host}\n"
"\n"
"host\n"
"UNSIGNED-PAYLOAD"
).format(**locals()).encode()
canonical_request_hash = sha256(canonical_request).hexdigest()
string_to_sign = (
"AWS4-HMAC-SHA256\n"
"{timestamp}\n"
"{today}/{region}/s3/aws4_request\n"
"{canonical_request_hash}"
).format(**locals()).encode()
aws4_secret_key = b"AWS4" + website.app_conf.s3_secret_key.encode()
sig_key = hmac.new(aws4_secret_key, today.encode(), sha256).digest()
sig_key = hmac.new(sig_key, region.encode(), sha256).digest()
sig_key = hmac.new(sig_key, b"s3", sha256).digest()
sig_key = hmac.new(sig_key, b"aws4_request", sha256).digest()
signature = hmac.new(sig_key, string_to_sign, sha256).hexdigest()
return endpoint + "/" + key + "?" + querystring + "&X-Amz-Signature=" + signature
NO_DEFAULT = object()
def get_int(d, k, default=NO_DEFAULT, minimum=None, maximum=None):
try:
r = d[k]
except (KeyError, Response):
if default is NO_DEFAULT:
raise
return default
try:
r = int(r)
except (ValueError, TypeError):
raise Response().error(400, "`%s` value %r is not a valid integer" % (k, r))
if minimum is not None and r < minimum:
raise | |
"""
Helper Methods
Author: Austin Transportation Department, Data and Technology Services
Description: The purpose of this script is to provide any methods that
assist any script associated to this application.
"""
import sys
import glob
import csv
import io
import json
import re
import datetime
from dateutil import parser
# Dependencies
from .queries import search_crash_query, search_crash_query_full
from .request import run_query
from .helpers_import_fields import (
CRIS_TXDOT_FIELDS,
CRIS_TXDOT_COMPARE_FIELDS_LIST,
CRIS_TXDOT_COMPARE_FIELD_TYPE,
)
from .config import ATD_ETL_CONFIG
def generate_template(
name, function, fields, fieldnames=[], upsert=False, constraint="", crash=False
):
"""
Returns a string with a graphql template
:param str name: The name of the graphql mutation
:param str function: The name of the graphql function
:param str fields: The value of the fields in graphql expression
:param str[] fieldnames: An array of strings containing the names of the columns
:param bool upsert: If true, adds upsert logic; false otherwise.
:param str constraint: The name of the constraint on_conflict
:return str:
"""
if crash:
update_cr3 = 'cr3_stored_flag: "N"'
fieldnames += ["cr3_stored_flag"]
else:
update_cr3 = ""
if upsert:
on_conflict = """
, on_conflict: {
constraint: %CONFLICT_CONSTRAINT%,
update_columns: [
%CONFLICT_FIELDS%
]
}
""".replace(
"%CONFLICT_CONSTRAINT%", constraint
).replace(
"%CONFLICT_FIELDS%", ",\n".join([f.lower() for f in fieldnames])
)
else:
on_conflict = ""
return (
"""
mutation %NAME% {
%FUNCTION%(
objects: {
%FIELDS%
%UPDATE_CR3%
}
%ON_CONFLICT%
){
affected_rows
}
}
""".replace(
"%NAME%", name
)
.replace("%FUNCTION%", function)
.replace("%FIELDS%", fields)
.replace("%UPDATE_CR3%", update_cr3)
.replace("%ON_CONFLICT%", on_conflict)
)
def lowercase_group_match(match):
"""
Return the lowercase of a group match.
:param match: raw string of the group match
:return str: string in lower case
"""
return "%s:" % match.group(1).lower()
def generate_fields_with_filters(line, fieldnames, filters=[]):
"""
Generates a list of fields for graphql query
:param line: string - The raw csv line
:param fieldnames: array of strings - The fields to be used as headers
:param filters: dict - The filters to be applied
:return:
"""
reader = csv.DictReader(
f=io.StringIO(line), fieldnames=fieldnames, delimiter=","
) # parse line
fields = json.dumps([row for row in reader]) # Generate json
# Remove object characters
fields = fields.replace("[{", "").replace("}]", "")
# Lowercase the keys
fields = re.sub(r'"([a-zA-Z0-9_]+)":', lowercase_group_match, fields)
# Make empty strings null
fields = re.sub(r'([a-zA-Z0-9_]+): "",', r"\1: null,", fields)
# Break lines & remove ending commas
fields = re.sub(r'(\, )(([^"]+)(: ?)(\")([^"]+)(\"))', r"\n\2", fields)
fields = re.sub(r"(null, )([a-zA-Z0-9\_]+)", r"null\n\2", fields)
fields = re.sub(r"(, )([a-zA-Z0-9\_]+)(: null)", r"\n\2: null", fields)
# Apply filters
for filter_group in filters:
filter_function = filter_group[0]
filter_function_arguments = filter_group[1]
try:
fields_copy = fields
fields = filter_function(input=fields, fields=filter_function_arguments)
except Exception as e:
print("Error when applying filter: %s" % str(e))
return fields
def get_crash_id(line):
"""
Takes a raw CSV line and returns a crash_id
:param str line: The raw CSV line
:return str: The Crash ID
"""
try:
return line.strip().split(",")[0]
except Exception as e:
print("Error: " + str(e))
return ""
def generate_gql(line, fieldnames, file_type, upsert=True):
"""
Returns a string with the final graphql query
:param string line: The raw csv line
:param string[] fieldnames: An array containing the names of fields
:param string file_type: the type of insertion (crash, units, etc...)
:param bool upsert: When True, it generates an on_conflict statement for upsertion.
:return:
"""
filters = CRIS_TXDOT_FIELDS[file_type]["filters"]
query_name = CRIS_TXDOT_FIELDS[file_type]["query_name"]
function_name = CRIS_TXDOT_FIELDS[file_type]["function_name"]
try:
fields = generate_fields_with_filters(
line=line, fieldnames=fieldnames, filters=filters
)
# The variable `filters[0][1]` contains all the columns we need to remove.
# We need `template_fields` to contain a lower-case array of all strings
# in `fieldnames` as long as they are not in the removed list `filters[0][1]`
template_fields = [
field.lower() for field in fieldnames if field.lower() not in filters[0][1]
]
# Generate Template
template = generate_template(
name=query_name,
function=function_name,
fields=fields,
fieldnames=template_fields,
upsert=upsert,
constraint={
"crash": "atd_txdot_crashes_pkey",
"charges": "uniq_atd_txdot_charges",
"unit": "atd_txdot_units_unique",
"person": "atd_txdot_person_unique",
"primaryperson": "atd_txdot_primaryperson_unique",
}.get(file_type, None),
crash=(file_type == "crash"),
)
except Exception as e:
print("generate_gql() Error: " + str(e))
template = ""
return template
def record_exists_hook(line, file_type):
"""
Returns True if the record already exists, False if it cannot find it.
:param line: string - The raw record in CSV format
:param file_type: string - The parameter as passed to the terminal
:return: boolean - True if the record exists, False otherwise.
"""
# If the record type is a crash:
if file_type == "crash":
"""
Approach:
- Crashes:
1. Checks if the crash is already there
2. If the crash does not exist then returns False
3. Script will attempt to insert
- Others:
- Assume the record isn't there.
- Let fail at insertion.
"""
crash_id = get_crash_id(line)
query = search_crash_query(crash_id)
try:
result = run_query(query)
return len(result["data"]["atd_txdot_crashes"]) > 0
except Exception as e:
print("record_exists_hook() Error: " + str(e))
return True
# Any other record types just assume false.
# In the future we may want to write a function that
# performs searches.
return False
def handle_record_error_hook(line, gql, file_type, response={}, line_number="n\a"):
"""
Returns true to stop the execution of this script, false to mark as a non-error and move on.
:param line: string - the csv line being processed
:param gql: string - the graphql query that was at fault
:param file_type: string - the type of record being processed
:param response: dict - The json response from the request output
:param line_number: string - The line number where the error occurs
:return: bool - True to signal error and stop execution, False otherwise.
"""
# We must ignore constraint-violation errors,
# it means we are trying to overwrite a record.
if "constraint-violation" in str(response):
return False
# Otherwise, this could be a legitimate problem,
# for which we must stop the execution
else:
print(
"""\n\n------------------------------------------
Fatal Error
-----------------------------------------
Line: \t%s
CrashID:\t%s
Line: \t%s
Type: \t%s \n
Query: \t%s \n
Response: %s \n
------------------------------------------\n\n
"""
% (
line_number,
get_crash_id(line),
str(line).strip(),
file_type,
gql,
str(response),
)
)
return True
def get_file_list(file_type):
"""
Returns a list of all files to be processed
:param file_type: string - The type to be used: crash, charges, person, primaryperson, unit
:return: array
"""
return glob.glob("/data/extract_*_%s_*.csv" % file_type)
def generate_run_config():
"""
It takes the arguments passed to the python script and it generates
a configuration json dictionary with a list of all csv files to process
and the number of lines to skip per file.
:return: dict
"""
# Our dictionary template
config = {
"file_dryrun": False,
"file_type": "",
"file_list_raw": [],
"skip_rows_raw": [],
}
# First we try to get the file type from the 1st argument
try:
config["file_type"] = str(sys.argv[1]).lower()
except:
# Or force quit, we really need it.
print("No file type provided")
exit(1)
# Gather a skip rows expressions
try:
sr_expression = str(sys.argv[2]).lower()
config["skip_rows_raw"] = sr_expression.split(",")
except:
config["skip_rows_raw"] = []
# We need to determine if this is a dry-run
try:
if "--dryrun" in sys.argv:
config["file_dryrun"] = True
else:
config["file_dryrun"] = False
except:
config["file_dryrun"] = False
print("Dry-run not defined, assuming running without dry-run mode.")
# Gather the list of files
config["file_list_raw"] = get_file_list(file_type=config["file_type"])
# Final list placeholder
finalFileList = []
# For every file in the list
for i in range(0, len(config["file_list_raw"])):
# Get the file path
file = config["file_list_raw"][i]
try:
# Try reading the number of lines in different array
# or assume zero on exception.
skip_lines_string = config["skip_rows_raw"][i]
# If this is a star, signal we are going to skip all lines.
if skip_lines_string == "*":
skip_lines_string = "-1"
# Parse string number into actual integer
skip_lines_value = int(skip_lines_string)
except:
# On failure assume zero
skip_lines_value = 0
# Append a mini-dictionary into the finalFileList
finalFileList.append({"file": file, "skip": skip_lines_value})
# Assign to the final template the finalFileList array of dictionaries
config["file_list"] = finalFileList
# Return the config
return config
def get_crash_record(crash_id):
"""
Obtains a single crash record based on the crash_id
:param crash_id: string - The crash id to obtain from the database
:return: string
"""
# First generate a query with a list of the columns we care about
query = search_crash_query_full(
crash_id=crash_id,
field_list=(
CRIS_TXDOT_COMPARE_FIELDS_LIST
+ [
"apd_human_update",
"geocode_provider",
"qa_status",
"updated_by",
"changes_approved_date",
]
),
)
# Then try to run the query to get the actual record
try:
result = run_query(query)
return result["data"]["atd_txdot_crashes"][0]
except Exception as e:
return None
def is_cris_date(string):
"""
Returns True if | |
from sympy import symbols, lambdify, diff, sqrt, I
from sympy import besselj, hankel1, atan2, exp, pi, tanh
import scipy.special as scp
import numpy as np
from scipy.sparse.linalg import gmres
# gmres iteration counter
# https://stackoverflow.com/questions/33512081/getting-the-number-of-iterations-of-scipys-gmres-iterative-method
class gmres_counter(object):
def __init__(self, disp=True):
self._disp = disp
self.niter = 0
def __call__(self, rk=None):
self.niter += 1
counter = gmres_counter()
def sommerfeld(k, beta, interval, exp_type):
"""
evaluates Sommerfeld term either full or partial based on argument provided
"""
x_1, y_1, x_2, y_2, ny_1, ny_2 = symbols("x_1, y_1, x_2, y_2, ny_1, ny_2")
som_int_sp = 0
t_max = int(k) + interval
C = 1
for t in range(-t_max, t_max + 1):
lam = t - I * tanh(t)
d_lam = 1 - I * (1 - tanh(t) ** 2)
f = (lam ** 2 - k ** 2) ** 0.5
if exp_type == "full":
term = (
exp(-f * (x_2 + y_2))
* exp(I * lam * (x_1 - y_1))
* (f + I * beta)
/ (f - I * beta)
/ f
* d_lam
/ 4
/ pi
)
else:
term = (
exp(-f * (x_2 + y_2))
* exp(I * lam * (x_1 - y_1))
* exp((-f + I * beta) * C)
/ (f - I * beta)
/ f
* d_lam
* I * beta
/ 2
/ pi
)
if t == -t_max or t == t_max:
som_int_sp += 0.5 * term
else:
som_int_sp += term
som_sp = lambdify([x_1, x_2, y_1, y_2], som_int_sp)
som_int_dp = ny_1 * diff(som_int_sp, y_1) + ny_2 * diff(som_int_sp, y_2)
som_dp = lambdify([x_1, x_2, y_1, y_2, ny_1, ny_2], som_int_dp)
return som_sp, som_dp
def eval_sp_dp_QBX(order, k):
"""
evaluates single layer, double layer and corr QBX coeffs for Helmholtz
calculates image potential for y = -y - eta as well
"""
x_1, y_1, x_2, y_2, eta = symbols("x_1, y_1, x_2, y_2, eta")
nx_1, nx_2, ny_1, ny_2, r = symbols("nx_1, nx_2, ny_1, ny_2, r")
dist = sqrt((x_1 - y_1) ** 2 + (x_2 - y_2) ** 2)
kernel = I / 4 * hankel1(0, k * dist)
single_layer = lambdify([x_1, x_2, y_1, y_2], kernel)
green_normal_der = ny_1 * diff(kernel, y_1) + ny_2 * diff(kernel, y_2)
double_layer = lambdify([x_1, x_2, y_1, y_2, ny_1, ny_2], green_normal_der)
# image in y=0 calculations
image_dist = sqrt((x_1 - y_1) ** 2 + (x_2 + y_2 + eta) ** 2)
image_kernel = I / 4 * hankel1(0, k * image_dist)
image_single_layer = lambdify([x_1, x_2, y_1, y_2, eta], image_kernel)
image_green_normal_der = ny_1 * diff(image_kernel, y_1) + ny_2 * diff(
image_kernel, y_2
)
image_double_layer = lambdify(
[x_1, x_2, y_1, y_2, eta, ny_1, ny_2], image_green_normal_der
)
# Grafs theorem term evaluations
c_1 = x_1 + nx_1 * r
c_2 = x_2 + nx_2 * r
xc = sqrt((x_1 - c_1) ** 2 + (x_2 - c_2) ** 2)
yc = sqrt((y_1 - c_1) ** 2 + (y_2 - c_2) ** 2)
x_theta = atan2((x_2 - c_2), (x_1 - c_1))
y_theta = atan2((y_2 - c_2), (y_1 - c_1))
img_yc = sqrt((y_1 - c_1) ** 2 + (-(y_2 + eta) - c_2) ** 2)
img_y_theta = atan2((-(y_2 + eta) - c_2), (y_1 - c_1))
# single layer expansion zeroth order term
qbx_exp_slp = I / 4 * hankel1(0, k * yc) * besselj(0, k * xc)
img_qbx_exp_slp = I / 4 * hankel1(0, k * img_yc) * besselj(0, k * xc)
for i in range(1, order + 1):
qbx_exp_slp += (
I
/ 4
* (
hankel1(i, k * yc)
* exp(I * i * y_theta)
* besselj(i, k * xc)
* exp(-I * i * x_theta)
)
)
qbx_exp_slp += (
I
/ 4
* (
hankel1(-i, k * yc)
* exp(-I * i * y_theta)
* besselj(-i, k * xc)
* exp(I * i * x_theta)
)
)
img_qbx_exp_slp += (
I
/ 4
* (
hankel1(i, k * img_yc)
* exp(I * i * img_y_theta)
* besselj(i, k * xc)
* exp(-I * i * x_theta)
)
)
img_qbx_exp_slp += (
I
/ 4
* (
hankel1(-i, k * img_yc)
* exp(-I * i * img_y_theta)
* besselj(-i, k * xc)
* exp(I * i * x_theta)
)
)
qbx_exp_dlp = ny_1 * diff(qbx_exp_slp, y_1) + ny_2 * diff(qbx_exp_slp, y_2)
exp_term_slp = lambdify(
[x_1, x_2, y_1, y_2, nx_1, nx_2, ny_1, ny_2, r], qbx_exp_slp
)
exp_term_dlp = lambdify(
[x_1, x_2, y_1, y_2, nx_1, nx_2, ny_1, ny_2, r], qbx_exp_dlp
)
img_qbx_exp_dlp = ny_1 * diff(img_qbx_exp_slp, y_1) + ny_2 * diff(
img_qbx_exp_slp, y_2
)
img_exp_term_slp = lambdify(
[x_1, x_2, y_1, y_2, eta, nx_1, nx_2, ny_1, ny_2, r], img_qbx_exp_slp
)
img_exp_term_dlp = lambdify(
[x_1, x_2, y_1, y_2, eta, nx_1, nx_2, ny_1, ny_2, r], img_qbx_exp_dlp
)
return (
single_layer,
double_layer,
exp_term_slp,
exp_term_dlp,
image_single_layer,
image_double_layer,
img_exp_term_slp,
img_exp_term_dlp,
)
class Images_Integral:
def __init__(self, m, beta, img_sp, img_dp):
self.m = m
self.beta = beta
self.img_sp = img_sp
self.img_dp = img_dp
def eval_integral(self, targets, sources, source_normal_x, source_normal_y):
"""
evaluates the sum of integral of images on (m+1) dyadic intervals
"""
C = 1
dyad = 2 ** np.arange(-self.m, C, dtype=float)
dyadic_int = np.insert(dyad, 0, 0.0)
npoints = 8
ref_info = scp.legendre(npoints).weights
ref_nodes = ref_info[:, 0]
ref_weights = ref_info[:, 2]
image_nodes = np.zeros((self.m + 1, npoints))
image_weights = np.zeros((self.m + 1, npoints))
for i in range(self.m + 1):
a, b = dyadic_int[i : i + 2]
image_nodes[i] = ref_nodes * (b - a) * 0.5 + (b + a) * 0.5
image_weights[i] = 0.5 * (b - a) * ref_weights
image_nodes = image_nodes.reshape(-1)
image_weights = image_weights.reshape(-1)
# Neumann condition image
sp_sum_int = self.img_sp(targets[0], targets[1], sources[0], sources[1], 0)
dp_sum_int = self.img_dp(
targets[0],
targets[1],
sources[0],
sources[1],
0,
source_normal_x,
source_normal_y,
)
for i in range((self.m + 1) * npoints):
sp_sum_int += (
2
* self.beta
* 1j
* self.img_sp(
targets[0], targets[1], sources[0], sources[1], image_nodes[i]
)
* np.exp(1j * self.beta * image_nodes[i])
) * image_weights[i]
dp_sum_int += (
2
* self.beta
* 1j
* self.img_dp(
targets[0],
targets[1],
sources[0],
sources[1],
image_nodes[i],
source_normal_x,
source_normal_y,
)
* np.exp(1j * self.beta * image_nodes[i])
* image_weights[i]
)
return sp_sum_int, dp_sum_int
def bvp(n, k, domain, alpha, qbx_exp_slp, qbx_exp_dlp, rhs, **kwargs):
"""
solves the BVP for density
"""
normals_x, normals_y = domain.normals.reshape(2, -1)
nodes_x, nodes_y = domain.curve_nodes.reshape(2, -1)
# taking exp_radius as panel_length * 5 from QBX paper for controlled precision convg
# qbx_radius = np.repeat(domain.panel_lengths, domain.npoints) ** 0.5
qbx_radius = np.repeat(domain.panel_lengths, domain.npoints) * 0.5
total_points = nodes_x.shape[0]
normal_mat_x = np.broadcast_to(normals_x, (total_points, total_points))
normal_mat_y = np.broadcast_to(normals_y, (total_points, total_points))
node_mat_x = np.broadcast_to(nodes_x, (total_points, total_points))
node_mat_y = np.broadcast_to(nodes_y, (total_points, total_points))
radius_mat = np.broadcast_to(qbx_radius, (total_points, total_points)).T
# take care of normal signs here
D_qbx_int = qbx_exp_dlp(
node_mat_x.T,
node_mat_y.T,
node_mat_x,
node_mat_y,
-normal_mat_x.T,
-normal_mat_y.T,
normal_mat_x,
normal_mat_y,
radius_mat,
) * domain.curve_weights.reshape(-1)
D_qbx_ext = qbx_exp_dlp(
node_mat_x.T,
node_mat_y.T,
node_mat_x,
node_mat_y,
normal_mat_x.T,
normal_mat_y.T,
normal_mat_x,
normal_mat_y,
radius_mat,
) * domain.curve_weights.reshape(-1)
S_qbx = qbx_exp_slp(
node_mat_x.T,
node_mat_y.T,
node_mat_x,
node_mat_y,
normal_mat_x.T,
normal_mat_y.T,
normal_mat_x,
normal_mat_y,
radius_mat,
) * domain.curve_weights.reshape(-1)
rhs = rhs.reshape(-1)
# averaging interior exterior limits
A = (D_qbx_int + D_qbx_ext) * 0.5 + 0.5 * np.identity(total_points)
# or compute one side use jump relations: less work and faster
# has spectral convergence problems though
# A = D_qbx_int + np.identity(total_points)
A -= alpha * S_qbx * 1j
# adding images and sommerfeld contribution
if ("som_sp" in kwargs.keys()) and ("som_dp" in kwargs.keys()):
som_sp = kwargs["som_sp"]
som_dp = kwargs["som_dp"]
S_som = som_sp(
node_mat_x.T, node_mat_y.T, node_mat_x, node_mat_y
) * domain.curve_weights.reshape(-1)
D_som = som_dp(
node_mat_x.T,
node_mat_y.T,
node_mat_x,
node_mat_y,
normal_mat_x,
normal_mat_y,
) * domain.curve_weights.reshape(-1)
# A += S_som
A += D_som - alpha * 1j * S_som
if "imgs" in kwargs.keys():
imgs = kwargs["imgs"]
S_img, D_img = imgs.eval_integral(
(node_mat_x.T, node_mat_y.T),
(node_mat_x, node_mat_y),
normal_mat_x,
normal_mat_y,
) * domain.curve_weights.reshape(-1)
# A += S_img
A += D_img - alpha * 1j * S_img
soln_density, msg = gmres(A, rhs, tol=1e-11, callback=counter)
print("GMRES iter:", counter.niter)
return soln_density.reshape(n, -1)
def eval_target(
targets, sources, weights, source_normals, density, sp, dp, alpha, **kwargs
):
"""
evaluates the potential at target locations
"""
normals_x, normals_y = source_normals.reshape(2, -1)
nodes_x, nodes_y | |
#
# Copyright 2021 Xilinx, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import math
import subprocess
from optparse import OptionParser
import time
import os
import array
#statusPrint 0 == do nothing, 1 == regular print, 2 == logfile, 3 == regular print and logfile
statusPrint = 1
def logPrint(*args):
message = ""
for arg in args:
message += arg.__str__() + " "
if (statusPrint == 1) or (statusPrint == 3):
print(message)
if (statusPrint == 2) or (statusPrint == 3):
fLog = open("smoketest.log", "a")
message = message + "\n"
fLog.write(message)
fLog.close()
def createClip(width, height, frames, filename):
f = open(filename, "wb")
nrframes = frames
speedLineH = 0.5 * width / frames
speedLineV = 0.5 * height / frames
widthY = width
heightY = height
widthUV = int(widthY/2)
heightUV = int(heightY/2)
widthUV2 = int(widthUV/2)
arrY = bytearray(widthY * heightY)
arrU = bytearray(2 * widthUV * heightUV)
#arrV = bytearray(widthUV * heightUV)
startSec = time.time()
# create a zoneplate in a resolution 2 x h and 2 x v of the clip size
# this way we can easily make it a moving zoneplate
arrZP = bytearray(4 * widthY * heightY)
for y in range(0, 2 * heightY):
tmp1 = y * 0.0000003
tmp1 = y * 0.00000278
ytmp2 = y * 2 * widthY
for x in range(0, 2 * widthY):
tmp = math.cos(tmp1 * x * x)
Y = int(127 * (1.0 + tmp))
arrZP[x + ytmp2] = Y
for fr in range(0, nrframes):
for z in range(0, heightY):
# make the zonpelate look like it is moving in h and v direction
htmp = int((fr * widthY) / frames)
vtmp = int((fr * heightY) / frames)
arrY[z*widthY:z*widthY+widthY] = arrZP[htmp+vtmp*2*widthY+z*2*widthY:htmp+vtmp*2*widthY+z*2*widthY+widthY]
ufrtmp = (128 + int((255 / frames) * fr)) % 256
vfrtmp = (128 - int((255 / frames) * fr)) % 256
for y in range(0,heightUV):
if y < heightUV/2 + 60 and y > heightUV/2 - 60:
uvtmp1 = True
else:
uvtmp1 = False
uvtmp2 = 2 * y * widthUV
if y == (int(speedLineV*fr)):
uvtmp3 = True
else:
uvtmp3 = False
uvtmp4 = 2 * y * widthY
uvtmp5 = (2 * y + 1) * widthY
uvtmp8 = int(speedLineH*fr)
for x in range(0,widthUV):
U = ufrtmp
V = vfrtmp
uvtmp6 = x + x
uvtmp7 = x + x + 1
if uvtmp3 or x == uvtmp8:
U = 84
V = 255
arrY[uvtmp6 + uvtmp4] = 76
arrY[uvtmp7 + uvtmp4] = 76
arrY[uvtmp6 + uvtmp5] = 76
arrY[uvtmp7 + uvtmp5] = 76
if uvtmp1 and x < widthUV2 + 60 and x > widthUV2 - 60:
fr255 = fr & 0xFF
U = fr255
V = fr255
arrY[uvtmp6 + uvtmp4] = fr255
arrY[uvtmp7 + uvtmp4] = fr255
arrY[uvtmp6 + uvtmp5] = fr255
arrY[uvtmp7 + uvtmp5] = fr255
arrU[2*x + uvtmp2] = U
arrU[2*x + uvtmp2 + 1] = V
#arrV[x + uvtmp2] = V
f.write(arrY)
f.write(arrU)
#f.write(arrV)
f.close()
endSec = time.time()
totSec = int(endSec-startSec)
print("Time to create clip : " + str(totSec) + " seconds")
def testTranscode(frames, nrfiles, dir, logdir):
xstart = 1920
fail = 0
if (nrfiles < 1):
print("aborting; nr files needs to be at least 1")
raise SystemExit(1)
if (nrfiles == 1):
xstep = 0
else:
xstep = int((1920 - 320) / (nrfiles-1))
fps = [i for i in range(nrfiles)]
#decode with U30
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
startSec = time.time()
# check if file exists already
inputfile = dir+"/encodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+".265"
fe = os.path.exists(inputfile)
if (fe == False):
logPrint("File " + inputfile + " doesn't exist")
logPrint("Exiting ...")
raise SystemExit(1)
fps[step] = 0
logPrint("Transcoding HEVC "+str(x).zfill(4)+"x"+str(y).zfill(4)+" to h.264 960x540")
transcode_cmd = "u30_xma_transcode -c:v mpsoc_vcu_hevc -i " + inputfile + \
" -multiscale_xma -num-output 1 -out_1_width 960 -out_1_height 540 -c:v mpsoc_vcu_h264 -control-rate 0 -qp-mode 0 -slice-qp 20 -o " \
+dir+"/transcode"+str(x).zfill(4)+"x"+str(y).zfill(4)+".264" \
" > "+logdir+"/transcodestdout" +str(x).zfill(4)+"x"+str(y).zfill(4)+".log 2>> "+logdir+"/transcodestderr"+str(x).zfill(4)+"x"+str(y).zfill(4)+".log"
subprocess.Popen(transcode_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
endSec = time.time()
logfile = open(logdir+"/transcodestderr" +str(x).zfill(4)+"x"+str(y).zfill(4)+".log")
allNumbers = re.findall(r"[-+]?\d*\.\d+|\d+", logfile.read())
if len(allNumbers) == 0:
logPrint("Transcoder Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED: No fps stats found!")
fps[step] = -1
fail = 1
else:
fps[step] = allNumbers[-1]
output = subprocess.Popen("rm "+dir+"/transcode*.yuv", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
#decode the transcoded file and check for correctness
file_name = dir+"/transcode"+str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv"
decode_cmd = "ffmpeg -nostdin -i " + dir+"/transcode"+str(x).zfill(4)+"x"+str(y).zfill(4)+ \
".264 -c:v rawvideo -pix_fmt nv12 "+file_name
output = subprocess.Popen(decode_cmd, shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
xo = 960
yo = 540
fe = os.path.exists(file_name)
if fe:
file_stats = os.stat(file_name)
frames_mod = int (file_stats.st_size / (xo * yo * 1.5))
if file_stats.st_size != int(xo * yo * frames * 1.5):
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED: Number of frames is " + str(frames_mod) + " instead of "+ str(frames))
fail = fail + 1
#logPrint("Exiting ...")
#raise SystemExit(1)
f = open(file_name, "rb")
else:
logPrint("File " + file_name + " doesn't exist")
logPrint("Exiting ...")
raise SystemExit(1)
if fps[step] != 0:
testPassY = True
testPassUV = True
firstframe = 0
for i in range(0, frames_mod):
arrY = array.array('B')
arrU = array.array('B')
#arrV = array.array('B')
arrY.fromfile(f, xo*yo)
arrU.fromfile(f, int(xo*yo/2))
#arrV.fromfile(f, int(xo*yo/4))
xval = int((xo/2)+ (xo) * (yo/2))
uval = int((xo/2)+(xo/2) * (yo/2))
#vval = int((xo/4)+(xo/2) * (yo/4))
#if (i != arrY[xval]) or (i != arrU[uval]) or (i != arrV[vval]):
# ignoring UV for now as we know it fails
if (i != arrY[xval]) and testPassY == True:
#if testPassY == True:
# logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED" )
# logPrint("Mismatch :",x,y,i, arrY[xval], arrU[uval],arrV[vval])
testPassY = False
firstframe = i
if ((i != arrU[uval]) or (i != arrU[uval + 1])) and testPassUV == True:
#if testPassUV == True:
# logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED" )
# logPrint("Mismatch :",x,y,i, arrY[xval], arrU[uval],arrV[vval])
testPassUV = False
firstframe = i
if testPassY == True and testPassUV == True:
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" passed. Processed @ "+str(fps[step])+" fps" )
elif testPassY == True:
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Luma passed. Processed @ "+str(fps[step])+" fps" )
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Chroma FAILED. Processed @ "+str(fps[step])+" fps" )
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" First Mismatch detected in frame " + str(firstframe))
fail = fail + 1
else:
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED. Processed @ "+str(fps[step])+" fps" )
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED. First Mismatch detected in frame " + str(firstframe))
fail = fail + 1
f.close()
return fail
def testDecodeHEVC(frames, nrfiles, dir, logdir):
xstart = 1920
fail = 0
if (nrfiles < 1):
print("aborting; nr files needs to be at least 1")
raise SystemExit(1)
if (nrfiles == 1):
xstep = 0
else:
xstep = int((1920 - 320) / (nrfiles-1))
fps = [i for i in range(nrfiles)]
#decode with U30
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
startSec = time.time()
# check if file exists already
inputfile = dir+"/encodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+".265"
fe = os.path.exists(inputfile)
if (fe == False):
logPrint("File " + inputfile + " doesn't exist")
logPrint("Exiting ...")
raise SystemExit(1)
fps[step] = 0
logPrint("HEVC decoding "+str(x).zfill(4)+"x"+str(y).zfill(4))
decode_cmd = "u30_xma_decode -i " + inputfile + " -c:v mpsoc_vcu_h265 -o " + \
dir+"/decodehevc" + str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv" \
| |
import numpy as np
import gzip
from Bio import SeqIO
from pathlib import Path
import os
import subprocess
import tarfile
from io import BytesIO
#for parallel computing
from joblib import Parallel, delayed
import multiprocessing
num_cores_energy = multiprocessing.cpu_count()
from tqdm import tqdm
import pandas as pd
import sys
valid_aa = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y','-']
aa_3= ['ALA','CYS','ASP','GLU','PHE','GLY','HIS','ILE','LYS','LEU','MET','ASN','PRO','GLN','ARG','SER','THR','VAL','TRP','TYR','-']
d_aa_num= {a:i for i,a in enumerate(valid_aa)}
d_3to1 = {a3:a1 for a3,a1 in zip(aa_3,valid_aa)}
def read_dca_par(path_h_DCA, path_J_DCA):
''' read compressed DCA file '''
tar = tarfile.open(path_h_DCA, "r:gz")
for member in tar.getmembers():
f = tar.extractfile(member)
if f is not None:
content = f.read()
load_bytes = BytesIO(content)
h = np.load(load_bytes)
tar = tarfile.open(path_J_DCA, "r:gz")
for member in tar.getmembers():
f = tar.extractfile(member)
if f is not None:
content = f.read()
load_bytes = BytesIO(content)
J = np.load(load_bytes)
return h,J
def compute_sm_energy_dict(seq, h ,J):
''' for SINGLE MUTANTS, return a dictionary d['idx', 'mutated_aa'] = energy - energy_wild_type '''
''' it can be VERY SLOW and d_sm BIG(all possible sm ~ 21*L) '''
''' see below to speed it up '''
E0 = compute_energy(seq,h,J)
d_sm = {}
for i in range(0, len(seq)):
print(i, len(seq))
#add also the gap
for aa in valid_aa:
new_seq = seq[:i] + aa + seq[(i+1):]
E = compute_energy(new_seq,h,J)
print(E)
d_sm[i,aa] = np.round(E-E0,4)
return d_sm
def compute_sm_energy(seq, h ,J, idx, aa ):
''' for SINGLE MUTANTS, given the ref_seq,h,J and idx(pos_mutations) aa(mutated_aa)
return energy_sum_single_mutants - energy_wild_type '''
E0 = compute_energy(seq,h,J)
E_sum_sm = 0
for i,a_i in zip(idx, aa):
new_seq = seq[:i] + a_i + seq[(i+1):]
E = compute_energy(new_seq,h,J)
E_sum_sm += E
return np.round(E_sum_sm,4)
def compute_energy(seq, h, J, parallel = False):
if all_standard_aa(seq):
if(parallel == True):
#DO NOT USE FOR NOW!!!
#something weird... E_parallel != E_non_parallel
# parallel actually slower than non parallel (execution time limited by memory access and not processor time??)
E = 0
all_ei = Parallel(n_jobs=num_cores_energy)(delayed(compute_energy_given_ai)(seq, h, J, idx_ai) for idx_ai in range(0,len(seq)))
E = np.sum(all_ei)
return E
if(parallel == False):
E = 0
for idx_aa1 in range(0, len(seq)):
aa1 = seq[idx_aa1]
E -= h[d_aa_num[aa1], idx_aa1]
for idx_aa2 in range(idx_aa1+1, len(seq)):
aa2 = seq[idx_aa2]
E -= J[d_aa_num[aa1], d_aa_num[aa2], idx_aa1, idx_aa2]
return E
def compute_energy_given_ai(seq,h,J, idx_ai):
'''e.g. idx_ai=1; computing E_1 = h_1 + J_12 + J_13 etc. (good for parallelization)'''
ai = seq[idx_ai]
#print("**", idx_ai, ai)
ei = h[d_aa_num[ai], idx_ai]
for idx_aj in range(idx_ai+1, len(seq)):
aj = seq[idx_aj]
#print(idx_aj, aj)
ei -= J[d_aa_num[ai], d_aa_num[aj], idx_ai, idx_aj]
return ei
def compute_entropy_context_ind(path_msa):
''' compute context-independent entropy (from msa)'''
fi = compute_freq(path_msa)
S = compute_entropy_from_freq(fi)
return S
def compute_entropy_from_freq(fi, remove_gaps = True, base2 = True):
if remove_gaps:
fi = (fi[:20,:])/np.sum(fi[:20,:], axis = 0)
qq, N = fi.shape
S = []
for i in range(0,N):
si = 0
for q in range(0,qq):
si -= fi[q,i]*np.log(fi[q,i])
if base2:
si /= np.log(2)
S.append(si)
return S
def compute_entropy_context_dep(ref_seq, h,J ):
''' compute context-DEPENDENT entropy (from hhblits ref_seq, h, J)'''
q, N = h.shape
fi_plm = np.zeros(h.shape)
#same conventions than in Eq.5.8 (PhD thesis)
for i in range(0,N):
#compute denominator
denom = 0
for b in range(0,q):
arg_denom = h[b,i]
for j in range(0,N):
if(j!=i):
aj = d_aa_num[ref_seq[j]]
arg_denom += J[b, aj ,i, j]
denom += np.exp(arg_denom)
# compute numerator
for ai in range(0,q):
arg_num = h[ai,i]
for j in range(0,N):
if(j!=i):
aj = d_aa_num[ref_seq[j]]
arg_num += J[ai, aj ,i, j]
num = np.exp(arg_num)
fi_plm[ai,i] = num/denom
#return the entropy
S = compute_entropy_from_freq(fi_plm)
return S
def compute_num_gap(seq):
'''return the number of gaps in a sequence '''
num_gap = 0
for _,char in enumerate(seq):
if(char == '-'):
num_gap += 1
return num_gap
def compute_gap_fraction(seq):
num_gap = compute_num_gap(seq)
frac_gap = (num_gap+0.0)/len(seq)
return frac_gap
def compute_diff(ref_seq, seq):
''' compute the mutations between two strings, return idx_mut, aa_first_seq(wt), aa_second_seq(mutant)'''
vec_idx = []
vec_aa1 = []
vec_aa2 = []
for idx, aa in enumerate(zip(ref_seq,seq)):
aa1 = aa[0]
aa2 = aa[1]
if (aa1.lower() != aa2.lower()):
vec_idx.append(idx)
vec_aa1.append(aa1)
vec_aa2.append(aa2)
return vec_idx, vec_aa1, vec_aa2
def compute_dist(ref_seq, seq):
distance = sum([1 for x, y in zip(ref_seq, seq) if x.lower() != y.lower()])
return distance
def compute_dist_excluding_gaps(ref_seq, seq):
# distance = sum([1 for x, y in zip(ref_seq, seq) if ( x.lower() != y.lower() or x == '-' or y == '-' )])
distance = 0
for x, y in zip(ref_seq, seq):
if x == '-':
continue
elif y == '-':
continue
elif x.lower() != y.lower():
distance += 1
return distance
def compute_seqid(ref_seq, seq):
'''return the sequence identity (seqid) '''
distance = compute_dist_excluding_gaps(ref_seq,seq)
distance /= len(seq)
seqid = 1 - distance
return seqid
def compute_freq(path_msa):
''' compute single point frequencies of an MSA '''
records_msa = list(SeqIO.parse(open(path_msa,'r'), "fasta"))
fi = np.zeros(( len(d_aa_num), len(records_msa[0].seq) ))
for idx_rec, rec in enumerate(records_msa):
seq = rec.seq
for idx_aa, amino_a in enumerate(seq):
fi[d_aa_num[amino_a], idx_aa] += 1
#add (small) pseudocount to take into account 0 frequencies (0*log(0))
alpha = 0.0001
fi = (1-alpha)*fi + alpha/2
#normalize
fi /= fi.sum(axis = 0)
return fi
def all_standard_aa(seq):
'''return True if sequence contains only standard-aa'''
for char in seq:
if((char not in valid_aa) and char !='-'):
#print("seq containing non standard aa: "+char)
return False
break
return True
def split_proteome(path_ref_proteome, name_ref_proteome, tmp_path):
''' simple function to split the reference proteome in reference proteins'''
with open(os.path.join(path_ref_proteome, name_ref_proteome), "r") as input_handle:
for record_ref in SeqIO.parse(input_handle, "fasta"):
name = record_ref.id
seq_ref = str(record_ref.seq)
#save tmp file with the seq of the reference
name_tmp_file = "ref_"+name
f_tmp = open(os.path.join(tmp_path,name_tmp_file),"w")
f_tmp.write(">"+name+"\n")
f_tmp.write(seq_ref)
f_tmp.close()
return 0
def run_hhblits(path_hhblits, path_ref_prot, path_db, path_msa_out, num_cores):
''' run hhblits, get the distant homologs MSA, return the number of sequences '''
#1) run hhblits
FNULL = open(os.devnull, 'w')
subprocess.run([path_hhblits, '-i', path_ref_prot, '-d', path_db, '-oa3m', path_ref_prot+".a3m", '-cpu' , str(num_cores)], stdout=FNULL, stderr=subprocess.STDOUT)
#num of sequences
file_out = open(path_msa_out, 'w')
#2) parse and filter the hhblits msa
with open(path_ref_prot+".a3m", "r") as input_handle:
for idx_record, record in enumerate(SeqIO.parse(input_handle, "fasta")):
seq = str(record.seq)
#hhblits ouput is a3m format, to make it a fasta remove dot and lower
seq = ''.join(char for char in seq if (char.isupper() or char =='-'))
# 2.1) do the filtering
records_ref = list(SeqIO.parse(open(path_ref_prot,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
# - remove sequences which are to gapped (i.e. gap_fraction mst be less than 10% gap)
# - remove sequence which are CLOSE to the reference sequence (i.e. sequence_identity must be LESS than 90%)
# - remove sequences containing non standard aa
if( (compute_gap_fraction(seq) < 0.1) and (compute_seqid(ref_seq, seq) < 0.9) and all_standard_aa(seq)):
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
file_out.close()
return 0
def filterMSA(path_ref_prot, path_msa_in, path_msa_out, include_refseq=True, max_grap_fraction = 0.2, max_seq_id = 0.9):
file_out = open(path_msa_out, 'w')
# parse and filter the msa
records_ref = list(SeqIO.parse(open(path_ref_prot,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
with open(path_msa_in, "r") as input_handle:
count = 1
for idx_record, record in enumerate(SeqIO.parse(input_handle, "fasta")):
seq = str(record.seq)
#remove dot and lower
seq = ''.join(char for char in seq if (char.isupper() or char =='-'))
# do the filtering
# - remove the sequences which are to gapped (i.e. sequence must contain less than 10 gap)
# - remove sequence which are close the reference sequence (i.e. sequence_identity must be less than 90%)
# - remove sequences containing non standard aa
if include_refseq and count == 1: # Keep the first seq, i.e., the reference sequence
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
count += 1
elif( (compute_gap_fraction(seq) < max_grap_fraction) and (compute_seqid(ref_seq, seq) < max_seq_id) and all_standard_aa(seq)):
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
file_out.close()
return 0
def filterMSA_gisaid(path_ref_prot, path_msa_in, path_msa_out, max_grap_fraction = 0.2, min_seq_id = 0.9):
file_out = open(path_msa_out, 'w')
# parse and filter the msa
records_ref = list(SeqIO.parse(open(path_ref_prot,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
with open(path_msa_in, "r") as input_handle:
count = 1
for idx_record, record in enumerate(SeqIO.parse(input_handle, "fasta")):
seq = str(record.seq)
#remove dot and lower
seq = ''.join(char for char in seq if (char.isupper() or char =='-'))
# do the filtering
# - remove the sequences which are to gapped (i.e. sequence must contain less than 10 gap)
# - remove sequence which are far the reference sequence (i.e. sequence_identity must greater than 90%)
# - remove sequences containing non standard aa
if count == 1: # Keep the first seq, i.e., the reference sequence
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
count += 1
elif( (compute_gap_fraction(seq) < max_grap_fraction) and (compute_seqid(ref_seq, seq) > min_seq_id) and all_standard_aa(seq)):
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
file_out.close()
return 0
def | |
F = GF(5); MS = MatrixSpace(F,2,2)
sage: G = MatrixGroup([MS(1), MS([1,2,3,4])])
sage: G
Matrix group over Finite Field of size 5 with 2 generators (
[1 0] [1 2]
[0 1], [3 4]
)
sage: G.gens()
(
[1 0] [1 2]
[0 1], [3 4]
)
"""
return tuple(self.element_class(self, x, check=False, convert=False)
for x in self._gens_matrix)
def gen(self, i):
"""
Return the `i`-th generator
OUTPUT:
The `i`-th generator of the group.
EXAMPLES::
sage: H = GL(2, GF(3))
sage: h1, h2 = H([[1,0],[2,1]]), H([[1,1],[0,1]])
sage: G = H.subgroup([h1, h2])
sage: G.gen(0)
[1 0]
[2 1]
sage: G.gen(0).matrix() == h1.matrix()
True
"""
return self.gens()[i]
def ngens(self):
"""
Return the number of generators
OUTPUT:
An integer. The number of generators.
EXAMPLES::
sage: H = GL(2, GF(3))
sage: h1, h2 = H([[1,0],[2,1]]), H([[1,1],[0,1]])
sage: G = H.subgroup([h1, h2])
sage: G.ngens()
2
"""
return len(self._gens_matrix)
def __reduce__(self):
"""
Used for pickling.
TESTS::
sage: G = MatrixGroup([matrix(CC, [[1,2],[3,4]]),
....: matrix(CC, [[1,3],[-1,0]])])
sage: loads(dumps(G)) == G
True
Check that :trac:`22128` is fixed::
sage: R = MatrixSpace(SR, 2)
sage: G = MatrixGroup([R([[1, 1], [0, 1]])])
sage: G.register_embedding(R)
sage: loads(dumps(G))
Matrix group over Symbolic Ring with 1 generators (
[1 1]
[0 1]
)
"""
return MatrixGroup, (self._gens_matrix, {'check': False})
def _test_matrix_generators(self, **options):
"""
EXAMPLES::
sage: m1 = matrix(SR, [[1,2],[3,4]])
sage: m2 = matrix(SR, [[1,3],[-1,0]])
sage: G = MatrixGroup(m1, m2)
sage: G._test_matrix_generators()
"""
tester = self._tester(**options)
for g,h in zip(self.gens(), MatrixGroup(self.gens()).gens()):
tester.assertEqual(g.matrix(), h.matrix())
###################################################################
#
# Matrix group over a ring that GAP understands
#
###################################################################
class FinitelyGeneratedMatrixGroup_gap(MatrixGroup_gap):
"""
Matrix group generated by a finite number of matrices.
EXAMPLES::
sage: m1 = matrix(GF(11), [[1,2],[3,4]])
sage: m2 = matrix(GF(11), [[1,3],[10,0]])
sage: G = MatrixGroup(m1, m2); G
Matrix group over Finite Field of size 11 with 2 generators (
[1 2] [ 1 3]
[3 4], [10 0]
)
sage: type(G)
<class 'sage.groups.matrix_gps.finitely_generated.FinitelyGeneratedMatrixGroup_gap_with_category'>
sage: TestSuite(G).run()
"""
def __reduce__(self):
"""
Implement pickling.
EXAMPLES::
sage: m1 = matrix(QQ, [[1,2],[3,4]])
sage: m2 = matrix(QQ, [[1,3],[-1,0]])
sage: loads(MatrixGroup(m1, m2).dumps())
Matrix group over Rational Field with 2 generators (
[1 2] [ 1 3]
[3 4], [-1 0]
)
"""
return (MatrixGroup,
tuple(g.matrix() for g in self.gens()) + ({'check':False},))
def as_permutation_group(self, algorithm=None):
r"""
Return a permutation group representation for the group.
In most cases occurring in practice, this is a permutation
group of minimal degree (the degree begin determined from
orbits under the group action). When these orbits are hard to
compute, the procedure can be time-consuming and the degree
may not be minimal.
INPUT:
- ``algorithm`` -- ``None`` or ``'smaller'``. In the latter
case, try harder to find a permutation representation of
small degree.
OUTPUT:
A permutation group isomorphic to ``self``. The
``algorithm='smaller'`` option tries to return an isomorphic
group of low degree, but is not guaranteed to find the
smallest one.
EXAMPLES::
sage: MS = MatrixSpace(GF(2), 5, 5)
sage: A = MS([[0,0,0,0,1],[0,0,0,1,0],[0,0,1,0,0],[0,1,0,0,0],[1,0,0,0,0]])
sage: G = MatrixGroup([A])
sage: G.as_permutation_group()
Permutation Group with generators [(1,2)]
sage: MS = MatrixSpace( GF(7), 12, 12)
sage: GG = gap("ImfMatrixGroup( 12, 3 )")
sage: GG.GeneratorsOfGroup().Length()
3
sage: g1 = MS(eval(str(GG.GeneratorsOfGroup()[1]).replace("\n","")))
sage: g2 = MS(eval(str(GG.GeneratorsOfGroup()[2]).replace("\n","")))
sage: g3 = MS(eval(str(GG.GeneratorsOfGroup()[3]).replace("\n","")))
sage: G = MatrixGroup([g1, g2, g3])
sage: G.cardinality()
21499084800
sage: set_random_seed(0); current_randstate().set_seed_gap()
sage: P = G.as_permutation_group()
sage: P.cardinality()
21499084800
sage: P.degree() # random output
144
sage: set_random_seed(3); current_randstate().set_seed_gap()
sage: Psmaller = G.as_permutation_group(algorithm="smaller")
sage: Psmaller.cardinality()
21499084800
sage: Psmaller.degree() # random output
108
In this case, the "smaller" option returned an isomorphic group of
lower degree. The above example used GAP's library of irreducible
maximal finite ("imf") integer matrix groups to construct the
MatrixGroup G over GF(7). The section "Irreducible Maximal Finite
Integral Matrix Groups" in the GAP reference manual has more
details.
TESTS::
sage: A= matrix(QQ, 2, [0, 1, 1, 0])
sage: B= matrix(QQ, 2, [1, 0, 0, 1])
sage: a, b= MatrixGroup([A, B]).as_permutation_group().gens()
sage: a.order(), b.order()
(2, 1)
"""
# Note that the output of IsomorphismPermGroup() depends on
# memory locations and will change if you change the order of
# doctests and/or architecture
from sage.groups.perm_gps.permgroup import PermutationGroup
if not self.is_finite():
raise NotImplementedError("Group must be finite.")
n = self.degree()
MS = MatrixSpace(self.base_ring(), n, n)
mats = [] # initializing list of mats by which the gens act on self
for g in self.gens():
p = MS(g.matrix())
m = p.rows()
mats.append(m)
mats_str = str(gap([[list(r) for r in m] for m in mats]))
gap.eval("iso:=IsomorphismPermGroup(Group("+mats_str+"))")
if algorithm == "smaller":
gap.eval("small:= SmallerDegreePermutationRepresentation( Image( iso ) );")
C = gap("Image( small )")
else:
C = gap("Image( iso )")
return PermutationGroup(gap_group=C, canonicalize=False)
def module_composition_factors(self, algorithm=None):
r"""
Return a list of triples consisting of [base field, dimension,
irreducibility], for each of the Meataxe composition factors
modules. The ``algorithm="verbose"`` option returns more information,
but in Meataxe notation.
EXAMPLES::
sage: F=GF(3);MS=MatrixSpace(F,4,4)
sage: M=MS(0)
sage: M[0,1]=1;M[1,2]=1;M[2,3]=1;M[3,0]=1
sage: G = MatrixGroup([M])
sage: G.module_composition_factors()
[(Finite Field of size 3, 1, True),
(Finite Field of size 3, 1, True),
(Finite Field of size 3, 2, True)]
sage: F = GF(7); MS = MatrixSpace(F,2,2)
sage: gens = [MS([[0,1],[-1,0]]),MS([[1,1],[2,3]])]
sage: G = MatrixGroup(gens)
sage: G.module_composition_factors()
[(Finite Field of size 7, 2, True)]
Type ``G.module_composition_factors(algorithm='verbose')`` to get a
more verbose version.
For more on MeatAxe notation, see
http://www.gap-system.org/Manuals/doc/ref/chap69.html
"""
from sage.misc.sage_eval import sage_eval
F = self.base_ring()
if not(F.is_finite()):
raise NotImplementedError("Base ring must be finite.")
q = F.cardinality()
gens = self.gens()
n = self.degree()
MS = MatrixSpace(F,n,n)
mats = [] # initializing list of mats by which the gens act on self
W = self.matrix_space().row_space()
for g in gens:
p = MS(g.matrix())
m = p.rows()
mats.append(m)
mats_str = str(gap([[list(r) for r in m] for m in mats]))
gap.eval("M:=GModuleByMats("+mats_str+", GF("+str(q)+"))")
gap.eval("MCFs := MTX.CompositionFactors( M )")
N = eval(gap.eval("Length(MCFs)"))
if algorithm == "verbose":
print(gap.eval('MCFs') + "\n")
L = []
for i in range(1,N+1):
gap.eval("MCF := MCFs[%s]"%i)
L.append(tuple([sage_eval(gap.eval("MCF.field")),
eval(gap.eval("MCF.dimension")),
sage_eval(gap.eval("MCF.IsIrreducible")) ]))
return sorted(L)
def invariant_generators(self):
r"""
Return invariant ring generators.
Computes generators for the polynomial ring
`F[x_1,\ldots,x_n]^G`, where `G` in `GL(n,F)` is a finite matrix
group.
In the "good characteristic" case the polynomials returned
form a minimal generating set for the algebra of `G`-invariant
polynomials. In the "bad" case, the polynomials returned
are primary and secondary invariants, forming a not
necessarily minimal generating set for the algebra of
`G`-invariant polynomials.
ALGORITHM:
Wraps Singular's ``invariant_algebra_reynolds`` and ``invariant_ring``
in ``finvar.lib``.
EXAMPLES::
sage: F = GF(7); MS = MatrixSpace(F,2,2)
sage: gens = [MS([[0,1],[-1,0]]),MS([[1,1],[2,3]])]
sage: G = MatrixGroup(gens)
sage: G.invariant_generators()
[x1^7*x2 - x1*x2^7,
x1^12 - 2*x1^9*x2^3 - x1^6*x2^6 + 2*x1^3*x2^9 + x2^12,
x1^18 + 2*x1^15*x2^3 + 3*x1^12*x2^6 + 3*x1^6*x2^12 - 2*x1^3*x2^15 + x2^18]
sage: q = 4; a = 2
sage: MS = MatrixSpace(QQ, 2, 2)
sage: gen1 = [[1/a,(q-1)/a],[1/a, -1/a]]; gen2 = [[1,0],[0,-1]]; gen3 = [[-1,0],[0,1]]
sage: G = MatrixGroup([MS(gen1),MS(gen2),MS(gen3)])
sage: G.cardinality()
12
sage: G.invariant_generators()
[x1^2 + 3*x2^2, x1^6 + 15*x1^4*x2^2 + 15*x1^2*x2^4 + 33*x2^6]
sage: F = CyclotomicField(8)
sage: z = F.gen()
sage: a = z+1/z
sage: b = z^2
sage: MS = MatrixSpace(F,2,2)
sage: g1 = MS([[1/a, 1/a], [1/a, -1/a]])
sage: g2 = MS([[-b, 0], [0, b]])
sage: G=MatrixGroup([g1,g2])
sage: G.invariant_generators()
[x1^4 + 2*x1^2*x2^2 + x2^4,
x1^5*x2 - x1*x2^5,
x1^8 + 28/9*x1^6*x2^2 + 70/9*x1^4*x2^4 + 28/9*x1^2*x2^6 + x2^8]
AUTHORS:
- <NAME>, <NAME> and <NAME>.
REFERENCES:
- Singular reference manual
- [Stu1993]_
- <NAME>, "Minimal Generating Sets of non-modular invariant
rings of finite groups", :arxiv:`math/0703035`.
"""
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.interfaces.singular import singular
gens = self.gens()
singular.LIB("finvar.lib")
n = self.degree() #len((gens[0].matrix()).rows())
F = self.base_ring()
q = F.characteristic()
## test if the field is admissible
if F.gen()==1: # we got the rationals or GF(prime)
FieldStr = str(F.characteristic())
elif hasattr(F,'polynomial'): # we got an algebraic extension
if len(F.gens())>1:
raise NotImplementedError("can only deal with finite fields and (simple algebraic extensions of) the rationals")
FieldStr = '(%d,%s)'%(F.characteristic(),str(F.gen()))
else: # we have a transcendental extension
FieldStr = '(%d,%s)'%(F.characteristic(),','.join([str(p) for p in F.gens()]))
## Setting Singular's variable names
## We need to make sure that field generator and variables get different names.
| |
= Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x597 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x598 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x599 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x600 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x601 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x602 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x603 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x604 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x605 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x606 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x607 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x608 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x609 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x610 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x611 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x612 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x613 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x614 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x615 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x616 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x617 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x618 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x619 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x620 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x621 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x622 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x623 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x624 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x625 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x626 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x627 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x628 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x629 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x630 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x631 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x632 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x633 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x634 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x635 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x636 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x637 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x638 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x639 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x640 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x641 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x642 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x643 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x644 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x645 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x646 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x647 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x648 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x649 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x650 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x651 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x652 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x653 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x654 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x655 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x656 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x657 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x658 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x659 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x660 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x661 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x662 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x663 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x664 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x665 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x666 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x667 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x668 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x669 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x670 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x671 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x672 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x673 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x674 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x675 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x676 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x677 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x678 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x679 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x680 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x681 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x682 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x683 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x684 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x685 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x686 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x687 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x688 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x689 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x690 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x691 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x692 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x693 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x694 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x695 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x696 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x697 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x698 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x699 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x700 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x701 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x702 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x703 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x704 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x705 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x706 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x707 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x708 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x709 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x710 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x711 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x712 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x713 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x714 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x715 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x716 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x717 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x718 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x719 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x720 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x721 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x722 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x723 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x724 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x725 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x726 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x727 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x728 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x729 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x730 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x731 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x732 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x733 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x734 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x735 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x736 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x737 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x738 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x739 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x740 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x741 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x742 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x743 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x744 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x745 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x746 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x747 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x748 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x749 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x750 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x751 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x752 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x753 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x754 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x755 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x756 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x757 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x758 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x759 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x760 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x761 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x762 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x763 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x764 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x765 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x766 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x767 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x768 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x769 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x770 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x771 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x772 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x773 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x774 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x775 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x776 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x777 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x778 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x779 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x780 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x781 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x782 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x783 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x784 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x785 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x786 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x787 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x788 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x789 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x790 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x791 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x792 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x793 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x794 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x795 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x796 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x797 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x798 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x799 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x800 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x801 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x802 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x803 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x804 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x805 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x806 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x807 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x808 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x809 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x810 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x811 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x812 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x813 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x814 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x815 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x816 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x817 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x818 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x819 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x820 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x821 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x822 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x823 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x824 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x825 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x826 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x827 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x828 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x829 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x830 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x831 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x832 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x833 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x834 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x835 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x836 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x837 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x838 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x839 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x840 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x841 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x842 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x843 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x844 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x845 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x846 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x847 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x848 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x849 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x850 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x851 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x852 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x853 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x854 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x855 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x856 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x857 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x858 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x859 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x860 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x861 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x862 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x863 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x864 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x865 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x866 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x867 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x868 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x869 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x870 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x871 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x872 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x873 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x874 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x875 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x876 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x877 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x878 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x879 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x880 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x881 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x882 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x883 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x884 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x885 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x886 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x887 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x888 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x889 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x890 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x891 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x892 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x893 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x894 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x895 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x896 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x897 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x898 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x899 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x900 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x901 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x902 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x903 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x904 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x905 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x906 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x907 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x908 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x909 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x910 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x911 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x912 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x913 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x914 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x915 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x916 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x917 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x918 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x919 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x920 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x921 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x922 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x923 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x924 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x925 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x926 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x927 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x928 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x929 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x930 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x931 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x932 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x933 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x934 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x935 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x936 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x937 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x938 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x939 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x940 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x941 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x942 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x943 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x944 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x945 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x946 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x947 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x948 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x949 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x950 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x951 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x952 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x953 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x954 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x955 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x956 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x957 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x958 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x959 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x960 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x961 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x962 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x963 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x964 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x965 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x966 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x967 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x968 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x969 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x970 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x971 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x972 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x973 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x974 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x975 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x976 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x977 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x978 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x979 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x980 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x981 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x982 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x983 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x984 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x985 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x986 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x987 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x988 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x989 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x990 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x991 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x992 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x993 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x994 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x995 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x996 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x997 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x998 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x999 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1000 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1001 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1002 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1003 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1004 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1005 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1006 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1007 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1008 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1009 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1010 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1011 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1012 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1013 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1014 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1015 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1016 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1017 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1018 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1019 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1020 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1021 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1022 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1023 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1024 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1025 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1026 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1027 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1028 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1029 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1030 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1031 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1032 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1033 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1034 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1035 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1036 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1037 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1038 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1039 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1040 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1041 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1042 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1043 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1044 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1045 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1046 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1047 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1048 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1049 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1050 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1051 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1052 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1053 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1054 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1055 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1056 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1057 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1058 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1059 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1060 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1061 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1062 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1063 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1064 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1065 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1066 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1067 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1068 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1069 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1070 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1071 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1072 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1073 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1074 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1075 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1076 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1077 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1078 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1079 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1080 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1081 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1082 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1083 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1084 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1085 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1086 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1087 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1088 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1089 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1090 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1091 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1092 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1093 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1094 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1095 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1096 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1097 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1098 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1099 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1100 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1101 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1102 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1103 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1104 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1105 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1106 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1107 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1108 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1109 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1110 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1111 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1112 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1113 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1114 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1115 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1116 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1117 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1118 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1119 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1120 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1121 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1122 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1123 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1124 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1125 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1126 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1127 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1128 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1129 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1130 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1131 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1132 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1133 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1134 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1135 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1136 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1137 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1138 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1139 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1140 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1141 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1142 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1143 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1144 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1145 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1146 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1147 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1148 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1149 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1150 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1151 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1152 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1153 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1154 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1155 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1156 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1157 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1158 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1159 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1160 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1161 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1162 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1163 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1164 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1165 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1166 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1167 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1168 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1169 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1170 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1171 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1172 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1173 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1174 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1175 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1176 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1177 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1178 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1179 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1180 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1181 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1182 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1183 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1184 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1185 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1186 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1187 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1188 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1189 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1190 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1191 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1192 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1193 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1194 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1195 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1196 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1197 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1198 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1199 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1200 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1201 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1202 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1203 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1204 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1205 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1206 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1207 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1208 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1209 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1210 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1211 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1212 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1213 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1214 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1215 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1216 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1217 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1218 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1219 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1220 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1221 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1222 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1223 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1224 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1225 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1226 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1227 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1228 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1229 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1230 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1231 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1232 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1233 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1234 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1235 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x1236 | |
<gh_stars>0
#!/usr/bin/env python3
#
# Copyright (c) 2017, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# This script reads an AFU top-level interface specification that describes
# the module name and ports expected by an AFU. It also reads a
# platform database that describes the top-level interface ports
# that the platform offers. The script validates that the platform meets
# the requirements of the AFU and constructs a set of SystemVerilog header
# and interface files that describe the platform. Files containing rules
# for loading the constructed headers and interfaces into either ASE or
# Quartus are also emitted.
#
import os
import sys
import glob
import argparse
import json
import pprint
from platmgr.jsondb import jsondb
from platmgr.emitcfg import emitConfig, emitSimConfig, emitQsfConfig
def errorExit(msg):
sys.stderr.write("\nError: " + msg + "\n")
sys.exit(1)
#
# Figure out the root of the base platform/AFU interface database. The
# database is installed along with OPAE SDK, so either find it in the
# installation tree or in the source tree.
#
def getDBRootPath():
# CMake will update any variables marked by @ with the proper values.
project_src_dir = '/root/platforms/scripts'
db_root_dir = 'share/opae/platform'
# Parent directory of the running script
parent_dir = os.path.dirname(
os.path.dirname(os.path.realpath(sys.argv[0])))
# If this script is installed, the above variables are substituted.
if (db_root_dir[0] != '@'):
# The script has at least had variables substituted. Either
# it is in the CMake build directory or it is installed.
if (os.path.isfile(os.path.join(parent_dir, 'CMakeCache.txt'))):
# We're in the CMake build directory. Use the source tree's
# database.
db_root = os.path.dirname(project_src_dir)
else:
# The script is installed. The installation path isn't known
# since it can be changed when using rpm --prefix. We do
# guarantee that the OPAE bin and share directories have the
# same parent.
db_root = os.path.join(parent_dir, db_root_dir)
else:
# Running out of the source tree
db_root = parent_dir
return db_root
#
# Walk the AFU's module-portments requirements and look for corresponding
# ports offered by the platform.
#
def matchAfuPorts(args, afu_ifc_db, platform_db):
afu_ports = []
afu_name = afu_ifc_db['file_name']
plat_name = platform_db['file_name']
if (not isinstance(afu_ifc_db['module-ports'], dict)):
errorExit("module-ports is not a dictionary " +
"in {0}".format(afu_ifc_db['file_path']))
if (not isinstance(platform_db['module-ports-offered'], dict)):
errorExit("module-ports-offered is not a dictionary " +
"in {0}".format(platform_db['file_path']))
if (args.verbose):
print("Starting module ports match...")
print(" AFU {0} requests:".format(afu_name))
for k in sorted(afu_ifc_db['module-ports'].keys()):
r = afu_ifc_db['module-ports'][k]
print(" {0}:{1}".format(r['class'], r['interface']))
print(" Platform {0} offers:".format(plat_name))
for k in sorted(platform_db['module-ports-offered'].keys()):
r = platform_db['module-ports-offered'][k]
print(" {0}:{1}".format(r['class'], r['interface']))
# Ports requested by the AFU
for port in list(afu_ifc_db['module-ports'].values()):
plat_match = None
# Ports offered by the platform
plat_key = port['class'] + '/' + port['interface']
if (plat_key not in platform_db['module-ports-offered']):
# Failed to find a match
if (not port['optional']):
errorExit(
"{0} needs port {1}:{2} that {3} doesn't offer".format(
afu_name, port['class'], port['interface'], plat_name))
else:
if (args.verbose):
print("Found match for port {0}:{1}".format(
port['class'], port['interface']))
plat_match = platform_db['module-ports-offered'][plat_key]
# Found a potential match.
match = {'afu': port, 'plat': plat_match}
# For vector classes, do the offered sizes work?
if (not port['vector'] and not plat_match['vector']):
# Not a vector
None
elif (port['vector'] and not plat_match['vector']):
# AFU wants a vector, but the platform doesn't offer one.
errorExit(("{0} port {1}:{2} expects a vector but the " +
"platform {3} offers only a non-vector!").format(
afu_name, port['class'],
port['interface'], plat_name))
elif (not port['vector'] and plat_match['vector']):
# Platform provides a vector, but the AFU doesn't want one
errorExit(("{0} port {1}:{2} expects a non-vector but the " +
"platform {3} offers only a vector!").format(
afu_name, port['class'],
port['interface'], plat_name))
else:
# Both are vectors. Pick a size, starting with either the most
# the platform will offer or the default number, depending on
# whether the AFU requested a specific number.
if ((port['max-entries'] == sys.maxsize) and
('default-entries' in port) and
(port['default-entries'] >= plat_match['min-entries']) and
(port['default-entries'] <=
plat_match['max-entries'])):
entries = port['default-entries']
elif ((port['max-entries'] == sys.maxsize) and
('default-entries' in plat_match) and
(plat_match['default-entries'] >= port['min-entries'])):
entries = plat_match['default-entries']
else:
entries = plat_match['max-entries']
# Constrain the number to what the AFU can accept
if (entries > port['max-entries']):
entries = port['max-entries']
if (entries < port['min-entries']):
errorExit(("{0} port {1}:{2} requires more vector " +
"entries than {3} provides!").format(
afu_name, port['class'], port['interface'],
plat_name))
if (entries < plat_match['min-entries']):
errorExit(("{0} port {1}:{2} requires more fewer " +
"entries than {3} provides!").format(
afu_name, port['class'], port['interface'],
plat_name))
# Found an acceptable number of entries
if (args.verbose):
print(
" {0} vector length is {1}".format(plat_key, entries))
match['num-entries'] = entries
# Valid module port
afu_ports.append(match)
return afu_ports
#
# Return a dictionary describing the AFU's desired top-level interface.
#
def getAfuIfc(args):
afu_ifc = dict()
if (args.ifc):
# Interface name specified on the command line
afu_ifc['class'] = args.ifc
afu_ifc['file_path'] = None
afu_ifc['file_name'] = None
else:
# The AFU top-level interface was not specified explicitly.
# Look for it in a JSON file.
if (not args.src):
errorExit("Either --ifc or --src must be specified. See --help.")
# Is the source argument a JSON file?
if (os.path.isfile(args.src)):
afu_json = args.src
# Is the source argument a directory?
elif (os.path.isdir(args.src)):
# Find all the JSON files in the directory
afu_json_list = [
f for f in os.listdir(args.src) if f.endswith(".json")]
if (len(afu_json_list) == 0):
errorExit("AFU source directory " +
"({0}) has no JSON file!".format(args.src))
if (len(afu_json_list) > 1):
errorExit("AFU source directory ({0}) has ".format(args.src) +
"multiple JSON files. The desired JSON file may " +
"be specified explicitly with --ifc.")
# Found a JSON file
afu_json = os.path.join(args.src, afu_json_list[0])
else:
errorExit("AFU source ({0}) not found!".format(args.src))
# Parse file JSON file
if (args.verbose):
print("Loading AFU interface from {0}".format(afu_json))
with open(afu_json) as f:
try:
data = json.load(f)
f.close()
except Exception:
sys.stderr.write("\nError parsing JSON file {0}\n\n".format(
afu_json))
raise
try:
afu_ifc = data['afu-image']['afu-top-interface']
# *** Clean up legacy AFU JSON ***
# The name 'module-ports' used to be 'module-arguments'.
# Maintain compatibility with older AFUs.
if ('module-arguments' in afu_ifc):
afu_ifc['module-ports'] = afu_ifc.pop('module-arguments')
# The interface 'class' used to be called 'name'.
# Maintain compatibility with older AFUs.
if ('name' in afu_ifc):
afu_ifc['class'] = afu_ifc.pop('name')
# Dereference the class to be sure it is present.
afu_ifc_class = afu_ifc['class']
except Exception:
# The JSON file doesn't name the top-level interface.
# Was a default specified on the command line?
msg = "No afu-image:afu-top-interface:class found in " + afu_json
if (args.default_ifc):
afu_ifc = dict()
afu_ifc['class'] = args.default_ifc
print("Warning: " + msg)
print(" Using default interface: {0}\n".format(
args.default_ifc))
else:
errorExit(msg)
afu_ifc['file_path'] = afu_json
afu_ifc['file_name'] = os.path.splitext(os.path.basename(afu_json))[0]
if (args.verbose):
print("AFU interface requested: {0}".format(afu_ifc))
return afu_ifc
# Fields that in AFU interface that may be updated by a particular AFU's
# JSON file.
legal_afu_ifc_update_classes = {
'default-entries',
'max-entries',
'min-entries',
'optional',
'params'
}
#
# An AFU's JSON database may override some parameters in the generic AFU
# interface description by specifying updates in the AFU's
# afu-image:afu-top-interface:module-ports
# field.
#
# In addition to overriding, the AFU JSON may extend the base interface
# | |
or entry[1] > metric_max:
metric_max = entry[1]
if (i+1) % aggrgeate_by ==0:
# Append aggregated data
end_time_dt = entry[2]
avg_time_dt = dt_from_s(s_from_dt(start_time_dt) + ((s_from_dt(end_time_dt) - s_from_dt(start_time_dt))/2)).astimezone(profile_timezone)
timestamp_dygraphs_avg = '{}/{:02d}/{:02d} {:02d}:{:02d}:{:02d}'.format(avg_time_dt.year, avg_time_dt.month, avg_time_dt.day, avg_time_dt.hour, avg_time_dt.minute, avg_time_dt.second)
data['timeseries_aggregated'][key].append((timestamp_dygraphs_avg, metric_avg/aggrgeate_by, metric_min, metric_max))
# Reset counters
metric_avg = 0
metric_min = None
metric_max = None
start_time_dt = None
# Reassign series
del data['timeseries']
data['timeseries'] = data['timeseries_aggregated']
logger.debug('Done aggregating')
# Load last sessions
try:
count =0
sessions = Session.objects.filter(thing=thing).order_by('-last_contact')[0:3]
for session in sessions:
count += 1
session.count = count
session.duration = str(session.last_contact-session.started)
if '.' in session.duration:
session.duration=session.duration.split('.')[0]
session.started = str(session.started.astimezone(profile_timezone)).split('.')[0]
session.last_contact = str(session.last_contact.astimezone(profile_timezone)).split('.')[0]
# Format worker traceback if any
session.last_worker_status_traceback = None
try:
if session.last_worker_status.startswith('KO: '):
pieces = session.last_worker_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_worker_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_worker_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
# Format management traceback if any
session.last_management_status_traceback = None
try:
if session.last_management_status.startswith('KO: '):
pieces = session.last_management_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_management_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_management_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
data['sessions'] = sessions
except:
data['sessions'] = None
# Enumerate the pools for this application
data['pools'] = []
for pool in Pool.objects.filter(app=thing.app):
data['pools'].append(pool)
# Ok, render
return render(request, 'dashboard_thing.html', {'data': data})
#===========================
# Session Dashboard view
#===========================
@private_view
def dashboard_thing_sessions(request):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
tid = request.GET.get('tid',None)
data['tid'] = tid
data['orpool'] = request.GET.get('orpool', None)
intaid = request.GET.get('intaid',None)
if not intaid:
intaid = request.POST.get('intaid',None)
data['intaid'] = intaid
start = request.GET.get('start', None)
end = request.GET.get('end', None)
if start is not None:
start = int(start)
end = start + 10
elif end is not None:
end = int(end)
start = end-10
if start<0: start = 0
else:
start = 0
end = 10
data['start'] = start
data['end'] = end
# Get App
try:
app = App.objects.get(id=intaid, user=request.user)
except App.DoesNotExist:
data['error'] = 'The App with Internal ID "{}" does not exists or you do not have access rights'.format(intaid)
return render(request, 'error.html', {'data': data})
# Get Thing
try:
thing = Thing.objects.get(tid=tid, app=app)
except Thing.DoesNotExist:
data['error'] = 'The Thing with ID "{}" does not exists or you do not have access rights'.format(tid)
return render(request, 'error.html', {'data': data})
data['thing'] = thing
# Load sessions
try:
count = 0
sessions = Session.objects.filter(thing=thing).order_by('-last_contact')[start:end]
for session in sessions:
count += 1
session.count = count
session.duration = session.last_contact-session.started
session.started = str(session.started.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
session.last_contact = str(session.last_contact.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
# Format worker traceback if any
session.last_worker_status_traceback = None
try:
if session.last_worker_status.startswith('KO: '):
pieces = session.last_worker_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_worker_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_worker_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
# Format management traceback if any
session.last_management_status_traceback = None
try:
if session.last_management_status.startswith('KO: '):
pieces = session.last_management_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_management_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_management_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
data['sessions'] = sessions
except:
data['sessions'] = None
# Ok, render
return render(request, 'dashboard_thing_sessions.html', {'data': data})
#===========================
# Message Dashboard view
#===========================
@private_view
def dashboard_thing_messages(request):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
tid = request.GET.get('tid',None)
if not tid:
tid = request.POST.get('tid',None)
data['tid'] = tid
intaid = request.GET.get('intaid',None)
if not intaid:
intaid = request.POST.get('intaid',None)
data['intaid'] = intaid
data['orpool'] = request.GET.get('orpool', None)
if not data['orpool']:
data['orpool'] = request.POST.get('orpool', None)
data['type'] = request.GET.get('type', None)
if not data['type']:
data['type'] = request.POST.get('type', None)
pagination = request.GET.get('pagination', 100)
# Force a pagination of 10 messages for the management
if data['type']=='management':
pagination=10
start = request.GET.get('start', None)
end = request.GET.get('end', None)
if start is not None:
start = int(start)
end = start + pagination
elif end is not None:
end = int(end)
start = end-pagination
if start<0: start = 0
else:
start = 0
end = pagination
# Get App
try:
app = App.objects.get(id=intaid, user=request.user)
except App.DoesNotExist:
data['error'] = 'The App with Internal ID "{}" does not exists or you do not have access rights'.format(intaid)
return render(request, 'error.html', {'data': data})
# Get Thing
try:
thing = Thing.objects.get(tid=tid, app=app)
except Thing.DoesNotExist:
data['error'] = 'The Thing with ID "{}" does not exists or you do not have access rights'.format(tid)
return render(request, 'error.html', {'data': data})
data['thing'] = thing
data['messages'] = []
if data['type']=='worker':
# Load worker messages
try:
for msg in WorkerMessageHandler.get(aid=thing.app.aid, tid=thing.tid, last=100):
# Fix time
msg.ts = str(msg.ts.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
# Convert from json to string
msg.data = str(msg.data)
# Truncate if too long
if len(msg.data) >= 150:
msg.data = str(msg.data[0:150]) + '...'
data['messages'].append(msg)
except Exception as e:
logger.debug('Error: {}'.format(e))
pass
elif data['type'] == 'management':
# Create new message if we are requested to do so
new_msg = request.POST.get('new_msg',None)
data['generated_uuid'] = str(uuid.uuid4())
generated_uuid = request.POST.get('generated_uuid',None)
if new_msg and generated_uuid:
# Does a message already exists?
try:
ManagementMessage.objects.get(tid=thing.tid, uuid=generated_uuid)
except:
ManagementMessage.objects.create(aid=thing.app.aid, tid=thing.tid, data=new_msg, uuid=generated_uuid)
# Load management messages
try:
for msg in ManagementMessage.objects.filter(tid=thing.tid, aid=thing.app.aid, type='APP').order_by('-ts')[start:end]:
msg.ts = str(msg.ts.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
data['messages'].append(msg)
except:
pass
else:
data['error'] = 'The value "{}" for message type is not valid.'.format(type)
return render(request, 'error.html', {'data': data})
# Set pagination
data['start'] = start
data['end'] = end if len(data['messages'])>=pagination else 0
# Ok, render
return render(request, 'dashboard_thing_messages.html', {'data': data})
#===========================
# Remote Shell view
#===========================
@private_view
def dashboard_thing_shell(request):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
intaid = request.GET.get('intaid',None)
if not intaid:
intaid = request.POST.get('intaid',None)
tid = request.GET.get('tid',None)
if not tid:
tid = request.POST.get('tid',None)
data['tid'] = tid
data['orpool'] = request.GET.get('orpool', None)
if not data['orpool']:
data['orpool'] = request.POST.get('orpool', None)
# Get App
try:
app = App.objects.get(id=intaid)
except App.DoesNotExist:
data['error'] = 'The app with internal id "{}" does not exists'.format(intaid)
return render(request, 'error.html', {'data': data})
# Get Thing
try:
thing = Thing.objects.get(tid=tid, app=app)
except Thing.DoesNotExist:
data['error'] = 'The thing with tid "{}" does not exists'.format(tid)
return render(request, 'error.html', {'data': data})
data['thing'] = thing
data['messages'] = []
# Create new message if we are requested to do so
new_msg = request.POST.get('new_msg',None)
if not new_msg:
new_msg = request.GET.get('new_msg',None)
data['generated_uuid'] = str(uuid.uuid4())
generated_uuid = request.POST.get('generated_uuid',None)
if not generated_uuid:
generated_uuid = request.GET.get('generated_uuid',None)
if new_msg and generated_uuid:
# Does a message already exists?
try:
ManagementMessage.objects.get(tid=thing.tid, uuid=generated_uuid)
except:
ManagementMessage.objects.create(aid=thing.app.aid, tid=thing.tid, data=new_msg, uuid=generated_uuid, type='CMD', thing=thing)
# Load CMD management messages (filter by Thing as they are linked to the thing and not a specific app)
for msg in ManagementMessage.objects.filter(thing=thing, type='CMD').order_by('ts'):
msg.ts = str(msg.ts.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
if msg.reply:
msg.reply_clean = msg.reply.rstrip('\n')
msg.reply_clean = msg.reply_clean.rstrip('\n\r')
else:
msg.reply = None
data['messages'].append(msg)
# Ok, render
return render(request, 'dashboard_thing_shell.html', {'data': data})
#===========================
# New App view
#===========================
@private_view
def new_app(request):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
data['app'] = None
data['lastsessions'] = []
data['pool'] = None
data['pythings_versions'] = settings.OS_VERSIONS
# Get name form GET request
app_name = request.POST.get('app_name',None)
pythings_version = request.POST.get('pythings_version',None)
management_interval = request.POST.get('management_interval',None)
worker_interval = request.POST.get('worker_interval',None)
# Set if to use the latest app version or not
use_latest_app_version = request.POST.get('uselatest', False)
if use_latest_app_version:
use_latest_app_version = True
if app_name:
create_app_helper(name = app_name,
user = request.user,
aid = None,
management_interval = management_interval,
worker_interval = worker_interval,
pythings_version = pythings_version,
use_latest_app_version = use_latest_app_version)
data['app_name'] = app_name
return render(request, 'new_app.html', {'data': data})
#===========================
# App code editor view
#===========================
@private_view
def dashboard_app_code_editor(request, embed=False):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
data['app'] = None
data['embed'] = '_embed' if embed else ''
# Get data
intaid = request.GET.get('intaid', None)
cid = request.GET.get('cid', None)
fileid = request.GET.get('fileid', None)
do_commit = request.GET.get('commit', None)
savednew = request.GET.get('savednew', False)
tagop = request.GET.get('tagop', None)
tagname = request.GET.get('tagname', None)
openworker = booleanize(request.GET.get('openworker', False))
data['savednew'] = savednew
data['tagop'] = tagop
data['tagname'] = tagname
if savednew != False and savednew.upper() != 'FALSE':
savednew=True
# Fix None
if cid is not None and cid.upper() == 'NONE':
cid = None
if savednew:
cid=None
data['cid'] = cid
# Get the application
try:
| |
"""Sub-classes for vtk.vtkRectilinearGrid and vtk.vtkImageData."""
import pathlib
import logging
import numpy as np
import pyvista
from pyvista import _vtk
from pyvista.utilities import abstract_class
from .dataset import DataSet
from .filters import _get_output, UniformGridFilters
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
@abstract_class
class Grid(DataSet):
"""A class full of common methods for non-pointset grids."""
def __init__(self, *args, **kwargs):
"""Initialize the grid."""
super().__init__()
@property
def dimensions(self):
"""Return a length 3 tuple of the grid's dimensions.
These are effectively the number of nodes along each of the three dataset axes.
"""
return list(self.GetDimensions())
@dimensions.setter
def dimensions(self, dims):
"""Set the dataset dimensions. Pass a length three tuple of integers."""
nx, ny, nz = dims[0], dims[1], dims[2]
self.SetDimensions(nx, ny, nz)
self.Modified()
def _get_attrs(self):
"""Return the representation methods (internal helper)."""
attrs = DataSet._get_attrs(self)
attrs.append(("Dimensions", self.dimensions, "{:d}, {:d}, {:d}"))
return attrs
class RectilinearGrid(_vtk.vtkRectilinearGrid, Grid):
"""Extend the functionality of a vtk.vtkRectilinearGrid object.
Can be initialized in several ways:
- Create empty grid
- Initialize from a vtk.vtkRectilinearGrid object
- Initialize directly from the point arrays
See _from_arrays in the documentation for more details on initializing
from point arrays
Examples
--------
>>> import pyvista
>>> import vtk
>>> import numpy as np
>>> # Create empty grid
>>> grid = pyvista.RectilinearGrid()
>>> # Initialize from a vtk.vtkRectilinearGrid object
>>> vtkgrid = vtk.vtkRectilinearGrid()
>>> grid = pyvista.RectilinearGrid(vtkgrid)
>>> # Create from NumPy arrays
>>> xrng = np.arange(-10, 10, 2)
>>> yrng = np.arange(-10, 10, 5)
>>> zrng = np.arange(-10, 10, 1)
>>> grid = pyvista.RectilinearGrid(xrng, yrng, zrng)
"""
_READERS = {'.vtk': _vtk.vtkRectilinearGridReader,
'.vtr': _vtk.vtkXMLRectilinearGridReader}
_WRITERS = {'.vtk': _vtk.vtkRectilinearGridWriter,
'.vtr': _vtk.vtkXMLRectilinearGridWriter}
def __init__(self, *args, **kwargs):
"""Initialize the rectilinear grid."""
super().__init__()
if len(args) == 1:
if isinstance(args[0], _vtk.vtkRectilinearGrid):
self.deep_copy(args[0])
elif isinstance(args[0], (str, pathlib.Path)):
self._from_file(args[0])
elif isinstance(args[0], np.ndarray):
self._from_arrays(args[0], None, None)
else:
raise TypeError(f'Type ({type(args[0])}) not understood by `RectilinearGrid`')
elif len(args) == 3 or len(args) == 2:
arg0_is_arr = isinstance(args[0], np.ndarray)
arg1_is_arr = isinstance(args[1], np.ndarray)
if len(args) == 3:
arg2_is_arr = isinstance(args[2], np.ndarray)
else:
arg2_is_arr = False
if all([arg0_is_arr, arg1_is_arr, arg2_is_arr]):
self._from_arrays(args[0], args[1], args[2])
elif all([arg0_is_arr, arg1_is_arr]):
self._from_arrays(args[0], args[1], None)
else:
raise TypeError("Arguments not understood by `RectilinearGrid`.")
def __repr__(self):
"""Return the default representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the str representation."""
return DataSet.__str__(self)
def _update_dimensions(self):
"""Update the dimensions if coordinates have changed."""
return self.SetDimensions(len(self.x), len(self.y), len(self.z))
def _from_arrays(self, x, y, z):
"""Create VTK rectilinear grid directly from numpy arrays.
Each array gives the uniques coordinates of the mesh along each axial
direction. To help ensure you are using this correctly, we take the unique
values of each argument.
Parameters
----------
x : np.ndarray
Coordinates of the nodes in x direction.
y : np.ndarray
Coordinates of the nodes in y direction.
z : np.ndarray
Coordinates of the nodes in z direction.
"""
# Set the coordinates along each axial direction
# Must at least be an x array
x = np.unique(x.ravel())
self.SetXCoordinates(_vtk.numpy_to_vtk(x))
if y is not None:
y = np.unique(y.ravel())
self.SetYCoordinates(_vtk.numpy_to_vtk(y))
if z is not None:
z = np.unique(z.ravel())
self.SetZCoordinates(_vtk.numpy_to_vtk(z))
# Ensure dimensions are properly set
self._update_dimensions()
@property
def meshgrid(self):
"""Return a meshgrid of numpy arrays for this mesh.
This simply returns a ``numpy.meshgrid`` of the coordinates for this
mesh in ``ij`` indexing. These are a copy of the points of this mesh.
"""
return np.meshgrid(self.x, self.y, self.z, indexing='ij')
@property
def points(self):
"""Return a copy of the points as an n by 3 numpy array."""
xx, yy, zz = self.meshgrid
return np.c_[xx.ravel(order='F'), yy.ravel(order='F'), zz.ravel(order='F')]
@points.setter
def points(self, points):
"""Points must be set along each axial direction.
Please set the point coordinates with the ``x``, ``y``, and ``z``
setters.
This setter overrides the base class's setter to ensure a user does not
attempt to set them.
"""
raise AttributeError("The points cannot be set. The points of "
"`RectilinearGrid` are defined in each axial direction. Please "
"use the `x`, `y`, and `z` setters individually."
)
@property
def x(self):
"""Get the coordinates along the X-direction."""
return _vtk.vtk_to_numpy(self.GetXCoordinates())
@x.setter
def x(self, coords):
"""Set the coordinates along the X-direction."""
self.SetXCoordinates(_vtk.numpy_to_vtk(coords))
self._update_dimensions()
self.Modified()
@property
def y(self):
"""Get the coordinates along the Y-direction."""
return _vtk.vtk_to_numpy(self.GetYCoordinates())
@y.setter
def y(self, coords):
"""Set the coordinates along the Y-direction."""
self.SetYCoordinates(_vtk.numpy_to_vtk(coords))
self._update_dimensions()
self.Modified()
@property
def z(self):
"""Get the coordinates along the Z-direction."""
return _vtk.vtk_to_numpy(self.GetZCoordinates())
@z.setter
def z(self, coords):
"""Set the coordinates along the Z-direction."""
self.SetZCoordinates(_vtk.numpy_to_vtk(coords))
self._update_dimensions()
self.Modified()
@Grid.dimensions.setter # type: ignore
def dimensions(self, dims):
"""Do not let the dimensions of the RectilinearGrid be set."""
raise AttributeError("The dimensions of a `RectilinearGrid` are implicitly defined and thus cannot be set.")
def cast_to_structured_grid(self):
"""Cast this rectilinear grid to a :class:`pyvista.StructuredGrid`."""
alg = _vtk.vtkRectilinearGridToPointSet()
alg.SetInputData(self)
alg.Update()
return _get_output(alg)
class UniformGrid(_vtk.vtkImageData, Grid, UniformGridFilters):
"""Extend the functionality of a vtk.vtkImageData object.
Can be initialized in several ways:
- Create empty grid
- Initialize from a vtk.vtkImageData object
- Initialize directly from the point arrays
See ``_from_specs`` in the documentation for more details on initializing
from point arrays
Examples
--------
>>> import pyvista
>>> import vtk
>>> import numpy as np
>>> # Create empty grid
>>> grid = pyvista.UniformGrid()
>>> # Initialize from a vtk.vtkImageData object
>>> vtkgrid = vtk.vtkImageData()
>>> grid = pyvista.UniformGrid(vtkgrid)
>>> # Using just the grid dimensions
>>> dims = (10, 10, 10)
>>> grid = pyvista.UniformGrid(dims)
>>> # Using dimensions and spacing
>>> spacing = (2, 1, 5)
>>> grid = pyvista.UniformGrid(dims, spacing)
>>> # Using dimensions, spacing, and an origin
>>> origin = (10, 35, 50)
>>> grid = pyvista.UniformGrid(dims, spacing, origin)
"""
_READERS = {'.vtk': _vtk.vtkDataSetReader, '.vti': _vtk.vtkXMLImageDataReader}
_WRITERS = {'.vtk': _vtk.vtkDataSetWriter, '.vti': _vtk.vtkXMLImageDataWriter}
def __init__(self, *args, **kwargs):
"""Initialize the uniform grid."""
super().__init__()
if len(args) == 1:
if isinstance(args[0], _vtk.vtkImageData):
self.deep_copy(args[0])
elif isinstance(args[0], (str, pathlib.Path)):
self._from_file(args[0])
else:
arg0_is_valid = len(args[0]) == 3
self._from_specs(args[0])
elif len(args) > 1 and len(args) < 4:
arg0_is_valid = len(args[0]) == 3
arg1_is_valid = False
if len(args) > 1:
arg1_is_valid = len(args[1]) == 3
arg2_is_valid = False
if len(args) > 2:
arg2_is_valid = len(args[2]) == 3
if all([arg0_is_valid, arg1_is_valid, arg2_is_valid]):
self._from_specs(args[0], args[1], args[2])
elif all([arg0_is_valid, arg1_is_valid]):
self._from_specs(args[0], args[1])
def __repr__(self):
"""Return the default representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the default str representation."""
return DataSet.__str__(self)
def _from_specs(self, dims, spacing=(1.0,1.0,1.0), origin=(0.0, 0.0, 0.0)):
"""Create VTK image data directly from numpy arrays.
A uniform grid is defined by the node spacings for each axis
(uniform along each individual axis) and the number of nodes on each axis.
These are relative to a specified origin (default is ``(0.0, 0.0, 0.0)``).
Parameters
----------
dims : tuple(int)
Length 3 tuple of ints specifying how many nodes along each axis
spacing : tuple(float)
Length 3 tuple of floats/ints specifying the node spacings for each axis
origin : tuple(float)
Length 3 tuple of floats/ints specifying minimum value for each axis
"""
xn, yn, zn = dims[0], dims[1], dims[2]
xs, ys, zs = spacing[0], spacing[1], spacing[2]
xo, yo, zo = origin[0], origin[1], origin[2]
self.SetDimensions(xn, yn, zn)
self.SetOrigin(xo, yo, zo)
self.SetSpacing(xs, ys, zs)
@property
def points(self):
"""Build a copy of the implicitly defined points as a numpy array."""
# Get grid dimensions
nx, ny, nz = self.dimensions
nx -= 1
ny -= 1
nz -= 1
# get the points and convert to spacings
dx, dy, dz = self.spacing
# Now make the cell arrays
ox, oy, oz = np.array(self.origin) + np.array(self.extent[::2])
x = np.insert(np.cumsum(np.full(nx, dx)), 0, 0.0) + ox
y = np.insert(np.cumsum(np.full(ny, dy)), 0, 0.0) + oy
z = np.insert(np.cumsum(np.full(nz, dz)), 0, 0.0) + oz
xx, yy, zz = np.meshgrid(x,y,z, indexing='ij')
return np.c_[xx.ravel(order='F'), yy.ravel(order='F'), zz.ravel(order='F')]
@points.setter
def points(self, points):
"""Points cannot be set.
This setter overrides the base class's setter to ensure a user does not
attempt to set them. See https://github.com/pyvista/pyvista/issues/713.
"""
raise AttributeError("The points cannot be set. The points of "
"`UniformGrid`/`vtkImageData` are implicitly defined by the "
"`origin`, `spacing`, and `dimensions` of the grid."
)
@property
def x(self):
"""Return all the X points."""
return self.points[:, 0]
@property
def y(self):
"""Return all the Y points."""
return self.points[:, 1]
@property
def z(self):
"""Return all the Z | |
<filename>tapiriik/services/Pulsstory/pulsstory.py
from tapiriik.settings import PULSSTORY_CLIENT_ID, PULSSTORY_CLIENT_SECRET
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.service_record import ServiceRecord
from tapiriik.services.stream_sampling import StreamSampler
from tapiriik.services.auto_pause import AutoPauseCalculator
from tapiriik.services.api import APIException, UserException, UserExceptionType, APIExcludeActivity
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, WaypointType, Waypoint, Location, Lap
from tapiriik.database import cachedb
from datetime import datetime, timedelta
import requests
import urllib.parse
import json
import logging
import collections
import zipfile
import io
logger = logging.getLogger(__name__)
class PulsstoryService(ServiceBase):
ID = "pulsstory"
DisplayName = "pulsstory"
DisplayAbbreviation = "PLS"
URLBase = 'https://www.pulsstory.com'
AuthenticationType = ServiceAuthenticationType.OAuth
UserProfileURL = URLBase + "/user/{0}/profile"
AuthenticationNoFrame = True # Chrome update broke this
_activityMappings = {"Running": ActivityType.Running,
"Cycling": ActivityType.Cycling,
"Mountain Biking": ActivityType.MountainBiking,
"Walking": ActivityType.Walking,
"Hiking": ActivityType.Hiking,
"Downhill Skiing": ActivityType.DownhillSkiing,
"Cross-Country Skiing": ActivityType.CrossCountrySkiing,
"Snowboarding": ActivityType.Snowboarding,
"Skating": ActivityType.Skating,
"Swimming": ActivityType.Swimming,
"Wheelchair": ActivityType.Wheelchair,
"Rowing": ActivityType.Rowing,
"Elliptical": ActivityType.Elliptical,
"Other": ActivityType.Other}
SupportedActivities = list(_activityMappings.values())
SupportsHR = True
SupportsCalories = True
SupportsCadence = True
SupportsPower = True
_wayptTypeMappings = {"start": WaypointType.Start, "end": WaypointType.End, "pause": WaypointType.Pause, "resume": WaypointType.Resume}
def WebInit(self):
self.UserAuthorizationURL = self.URLBase + "/Account/LogOn?&ReturnUrl=/ExternalSyncAPI/GenerateCode"
def RetrieveAuthorizationToken(self, req, level):
# might consider a real OAuth client
code = req.GET.get("code")
params = {"code": code, "client_id": PULSSTORY_CLIENT_ID, "client_secret": PULSSTORY_CLIENT_SECRET}
response = requests.post(self.URLBase + "/ExternalSyncAPI/GenerateToken", data=urllib.parse.urlencode(params), headers={"Content-Type": "application/x-www-form-urlencoded"})
if response.status_code != 200:
raise APIException("Invalid code")
token = response.json()["access_token"]
# This used to check with GetServiceRecordWithAuthDetails but that's hideously slow on an unindexed field.
uid = self._getUserId(ServiceRecord({"Authorization": {"Token": token}})) # meh
return (uid, {"Token": token})
def RevokeAuthorization(self, serviceRecord):
resp = requests.post(self.URLBase + "/ExternalSyncAPI/Deauthorize", data=self._apiData(serviceRecord))
if resp.status_code != 204 and resp.status_code != 200:
raise APIException("Unable to deauthorize pulsstory auth token, status " + str(resp.status_code) + " resp " + resp.text)
pass
def _apiData(self, serviceRecord):
return {"access_token": serviceRecord.Authorization["Token"]}
def _getAPIUris(self, serviceRecord):
if hasattr(self, "_uris"): # cache these for the life of the batch job at least? hope so
return self._uris
else:
response = requests.post(self.URLBase + "/ExternalSyncAPI/Uris", data=self._apiData(serviceRecord))
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise APIException("No authorization to retrieve user URLs", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to retrieve user URLs" + str(response))
uris = response.json()
for k in uris.keys():
if type(uris[k]) == str:
uris[k] = self.URLBase + uris[k]
self._uris = uris
return uris
def _getUserId(self, serviceRecord):
resp = requests.post(self.URLBase + "/ExternalSyncAPI/GetUserId", data=self._apiData(serviceRecord))
if resp.status_code != 200:
raise APIException("Unable to retrieve user id" + str(resp));
data = resp.json()
return data["userID"]
def DownloadActivityList(self, serviceRecord, exhaustive=False):
uris = self._getAPIUris(serviceRecord)
allItems = []
pageUri = uris["fitness_activities"]
while True:
response = requests.post(pageUri, data=self._apiData(serviceRecord))
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise APIException("No authorization to retrieve activity list", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to retrieve activity list " + str(response) + " " + response.text)
data = response.json()
allItems += data["Data"]["items"]
if not exhaustive or "next" not in data["Data"] or data["Data"]["next"] == "":
break
pageUri = self.URLBase + data["Data"]["next"]
activities = []
exclusions = []
for act in allItems:
try:
activity = self._populateActivity(act)
except KeyError as e:
exclusions.append(APIExcludeActivity("Missing key in activity data " + str(e), activity_id=act["URI"], user_exception=UserException(UserExceptionType.Corrupt)))
continue
logger.debug("\tActivity s/t " + str(activity.StartTime))
activity.ServiceData = {"ActivityID": act["URI"]}
activities.append(activity)
return activities, exclusions
def _populateActivity(self, rawRecord):
''' Populate the 1st level of the activity object with all details required for UID from pulsstory API data '''
activity = UploadedActivity()
# can stay local + naive here, recipient services can calculate TZ as required
activity.Name = rawRecord["Name"] if "Name" in rawRecord else None
activity.StartTime = datetime.strptime(rawRecord["StartTime"], "%Y-%m-%d %H:%M:%S")
activity.Stats.MovingTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=float(rawRecord["Duration"]))
activity.EndTime = activity.StartTime + timedelta(seconds=float(rawRecord["Duration"]))
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=rawRecord["Distance"])
if (activity.EndTime - activity.StartTime).total_seconds() > 0:
activity.Stats.Speed = ActivityStatistic(ActivityStatisticUnit.KilometersPerHour, avg=activity.Stats.Distance.asUnits(ActivityStatisticUnit.Kilometers).Value / ((activity.EndTime - activity.StartTime).total_seconds() / 60 / 60))
activity.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, value=rawRecord["Energy"] if "Energy" in rawRecord else None)
if rawRecord["Type"] in self._activityMappings:
activity.Type = self._activityMappings[rawRecord["Type"]]
activity.GPS = rawRecord["HasPath"] if "HasPath" in rawRecord else False
activity.Stationary = rawRecord["HasPoints"] if "HasPoints" in rawRecord else True
activity.Notes = rawRecord["Notes"] if "Notes" in rawRecord else None
activity.Private = rawRecord["Private"] if "Private" in rawRecord else True
activity.CalculateUID()
return activity
def DownloadActivity(self, serviceRecord, activity):
activityID = activity.ServiceData["ActivityID"]
response = requests.post(self.URLBase + activityID, data=self._apiData(serviceRecord))
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise APIException("No authorization to download activity" + activityID, block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to download activity " + activityID + " response " + str(response) + " " + response.text)
ridedata = response.json()
ridedata["Owner"] = serviceRecord.ExternalID
if "UserID" in ridedata and int(ridedata["UserID"]) != int(serviceRecord.ExternalID):
raise APIExcludeActivity("Not the user's own activity", activity_id=activityID, user_exception=UserException(UserExceptionType.Other))
self._populateActivityWaypoints(ridedata, activity)
if "Climb" in ridedata:
activity.Stats.Elevation = ActivityStatistic(ActivityStatisticUnit.Meters, gain=float(ridedata["Climb"]))
if "AvgHr" in ridedata:
activity.Stats.HR = ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=float(ridedata["AvgHr"]))
activity.Stationary = activity.CountTotalWaypoints() <= 1
return activity
def _convertList(self, streamData, streamDataKey, rawData, listName):
timeListName = listName + "Time"
valueListName = listName + "Value"
check = timeListName is not None and timeListName in rawData
check = check and valueListName is not None and valueListName in rawData
if check:
timeList = rawData[timeListName]
valueList = rawData[valueListName]
if timeList is not None and valueList is not None:
if len(timeList) > 0:
result = list(zip(timeList, valueList))
streamData[streamDataKey] = result
def _convertPathList(self, streamData, streamDataKey, rawData):
result = []
timeListName = "PathTime"
longitudeListName = "LongitudePathValue"
latitudeListName = "LatitudePathValue"
altitudeListName = "AltitudePathValue"
check = timeListName is not None and timeListName in rawData
check = check and longitudeListName in rawData
check = check and latitudeListName in rawData
if check:
timeList = rawData[timeListName]
longitudeList = rawData[longitudeListName]
latitudeList = rawData[latitudeListName]
if altitudeListName in rawData:
altitudeList = rawData[altitudeListName]
else:
altitudeList = None
if timeList is not None and longitudeList is not None and latitudeList is not None:
Nt = len(timeList)
if Nt > 0:
for n in range(Nt):
point = { "longitude" : longitudeList[n], "latitude": latitudeList[n] }
if altitudeList is not None:
point["altitude"] = altitudeList[n]
result.append((timeList[n], point))
streamData[streamDataKey] = result
def _populateActivityWaypoints(self, rawData, activity):
''' populate the Waypoints collection from pulsstory API data '''
lap = Lap(stats=activity.Stats, startTime=activity.StartTime, endTime=activity.EndTime)
activity.Laps = [lap]
streamData = {}
self._convertList(streamData, "heart_rate", rawData, "HeartRate")
self._convertList(streamData, "distance", rawData, "Distance")
self._convertList(streamData, "speed", rawData, "Speed")
self._convertList(streamData, "power", rawData, "Power")
self._convertList(streamData, "cadence", rawData, "Cadence")
self._convertPathList(streamData, "path", rawData)
def _addWaypoint(timestamp, path=None, heart_rate=None, power=None, distance=None, speed=None, cadence=None):
waypoint = Waypoint(activity.StartTime + timedelta(seconds=timestamp))
if path:
if path["latitude"] != 0 and path["longitude"] != 0:
waypoint.Location = Location(path["latitude"], path["longitude"], path["altitude"] if "altitude" in path and float(path["altitude"]) != 0 else None) # if you're running near sea level, well...
waypoint.Type = WaypointType.Regular
waypoint.HR = heart_rate
waypoint.Distance = distance
waypoint.Speed = speed
waypoint.Cadence = cadence
waypoint.Power = power
lap.Waypoints.append(waypoint)
StreamSampler.SampleWithCallback(_addWaypoint, streamData)
activity.Stationary = len(lap.Waypoints) == 0
activity.GPS = any(wp.Location and wp.Location.Longitude is not None and wp.Location.Latitude is not None for wp in lap.Waypoints)
if not activity.Stationary:
lap.Waypoints[0].Type = WaypointType.Start
lap.Waypoints[-1].Type = WaypointType.End
def UploadActivity(self, serviceRecord, activity, activitySource):
# assembly dict to post to pulsstory
uploadData = self._createUploadData(activity, False)
uris = self._getAPIUris(serviceRecord)
data = self._apiData(serviceRecord)
headers={}
jsonData = json.dumps(uploadData)
buffer = io.BytesIO()
with zipfile.ZipFile(buffer, 'w') as myzip:
myzip.writestr('activity.txt', jsonData, compress_type=zipfile.ZIP_DEFLATED)
files = {"data": buffer.getvalue()}
response = requests.post(uris["upload_activity_zip"], data=data, files=files, headers=headers)
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise APIException("No authorization to upload activity " + activity.UID, block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to upload activity " + activity.UID + " response " + str(response) + " " + response.text)
return response.json()["Id"]
def _getDuration(self, activity):
if activity.Stats.MovingTime.Value is not None:
return activity.Stats.MovingTime.asUnits(ActivityStatisticUnit.Seconds).Value
elif activity.Stats.TimerTime.Value is not None:
return activity.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value
else:
return (activity.EndTime - activity.StartTime).total_seconds()
def _createUploadData(self, activity, auto_pause=False):
''' create data dict for posting to pulsstory API '''
record = {}
record["Basic"] = {
"Name" : activity.Name,
"Duration" : self._getDuration(activity),
"Distance" : activity.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value,
"StartTime": activity.StartTime.strftime("%Y-%m-%d %H:%M:%S"),
"Type": activity.Type,
"Energy": activity.Stats.Energy.asUnits(ActivityStatisticUnit.Kilocalories).Value,
"Notes" : activity.Notes,
"Private" : activity.Private,
}
waypoints = {
"AvgHR" : activity.Stats.HR.Average,
"HeartRateValue" : [],
"HeartRateTime" : [],
"CadenceValue" : [],
"CadenceTime" : [],
"LongitudePathValue" : [],
"LatitudePathValue" : [],
"AltitudePathValue" : [],
"PathTime" : [],
"SpeedValue" : [],
"SpeedTime" : [],
"PowerValue" : [],
"PowerTime" : [],
}
record["Waypoints"] = waypoints;
if activity.CountTotalWaypoints() > 1:
flat_wps = activity.GetFlatWaypoints()
anchor_ts = flat_wps[0].Timestamp
# By default, use the provided waypoint types
wp_type_iter = (wp.Type for wp in flat_wps)
inPause = False
| |
{},
"csbSIPMthdCurrentStatsReqOut": {},
"csbSIPMthdCurrentStatsResp1xxIn": {},
"csbSIPMthdCurrentStatsResp1xxOut": {},
"csbSIPMthdCurrentStatsResp2xxIn": {},
"csbSIPMthdCurrentStatsResp2xxOut": {},
"csbSIPMthdCurrentStatsResp3xxIn": {},
"csbSIPMthdCurrentStatsResp3xxOut": {},
"csbSIPMthdCurrentStatsResp4xxIn": {},
"csbSIPMthdCurrentStatsResp4xxOut": {},
"csbSIPMthdCurrentStatsResp5xxIn": {},
"csbSIPMthdCurrentStatsResp5xxOut": {},
"csbSIPMthdCurrentStatsResp6xxIn": {},
"csbSIPMthdCurrentStatsResp6xxOut": {},
"csbSIPMthdHistoryStatsAdjName": {},
"csbSIPMthdHistoryStatsMethodName": {},
"csbSIPMthdHistoryStatsReqIn": {},
"csbSIPMthdHistoryStatsReqOut": {},
"csbSIPMthdHistoryStatsResp1xxIn": {},
"csbSIPMthdHistoryStatsResp1xxOut": {},
"csbSIPMthdHistoryStatsResp2xxIn": {},
"csbSIPMthdHistoryStatsResp2xxOut": {},
"csbSIPMthdHistoryStatsResp3xxIn": {},
"csbSIPMthdHistoryStatsResp3xxOut": {},
"csbSIPMthdHistoryStatsResp4xxIn": {},
"csbSIPMthdHistoryStatsResp4xxOut": {},
"csbSIPMthdHistoryStatsResp5xxIn": {},
"csbSIPMthdHistoryStatsResp5xxOut": {},
"csbSIPMthdHistoryStatsResp6xxIn": {},
"csbSIPMthdHistoryStatsResp6xxOut": {},
"csbSIPMthdRCCurrentStatsAdjName": {},
"csbSIPMthdRCCurrentStatsMethodName": {},
"csbSIPMthdRCCurrentStatsRespIn": {},
"csbSIPMthdRCCurrentStatsRespOut": {},
"csbSIPMthdRCHistoryStatsAdjName": {},
"csbSIPMthdRCHistoryStatsMethodName": {},
"csbSIPMthdRCHistoryStatsRespIn": {},
"csbSIPMthdRCHistoryStatsRespOut": {},
"csbSLAViolationNotifEnabled": {},
"csbSLAViolationNotifEnabledRev1": {},
"csbServiceStateNotifEnabled": {},
"csbSourceAlertNotifEnabled": {},
"cslFarEndTotalEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"cslTotalEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"cspFarEndTotalEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"cspTotalEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"cssTotalEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"csubAggStatsAuthSessions": {},
"csubAggStatsAvgSessionRPH": {},
"csubAggStatsAvgSessionRPM": {},
"csubAggStatsAvgSessionUptime": {},
"csubAggStatsCurrAuthSessions": {},
"csubAggStatsCurrCreatedSessions": {},
"csubAggStatsCurrDiscSessions": {},
"csubAggStatsCurrFailedSessions": {},
"csubAggStatsCurrFlowsUp": {},
"csubAggStatsCurrInvalidIntervals": {},
"csubAggStatsCurrTimeElapsed": {},
"csubAggStatsCurrUpSessions": {},
"csubAggStatsCurrValidIntervals": {},
"csubAggStatsDayAuthSessions": {},
"csubAggStatsDayCreatedSessions": {},
"csubAggStatsDayDiscSessions": {},
"csubAggStatsDayFailedSessions": {},
"csubAggStatsDayUpSessions": {},
"csubAggStatsDiscontinuityTime": {},
"csubAggStatsHighUpSessions": {},
"csubAggStatsIntAuthSessions": {},
"csubAggStatsIntCreatedSessions": {},
"csubAggStatsIntDiscSessions": {},
"csubAggStatsIntFailedSessions": {},
"csubAggStatsIntUpSessions": {},
"csubAggStatsIntValid": {},
"csubAggStatsLightWeightSessions": {},
"csubAggStatsPendingSessions": {},
"csubAggStatsRedSessions": {},
"csubAggStatsThrottleEngagements": {},
"csubAggStatsTotalAuthSessions": {},
"csubAggStatsTotalCreatedSessions": {},
"csubAggStatsTotalDiscSessions": {},
"csubAggStatsTotalFailedSessions": {},
"csubAggStatsTotalFlowsUp": {},
"csubAggStatsTotalLightWeightSessions": {},
"csubAggStatsTotalUpSessions": {},
"csubAggStatsUnAuthSessions": {},
"csubAggStatsUpSessions": {},
"csubJobControl": {},
"csubJobCount": {},
"csubJobFinishedNotifyEnable": {},
"csubJobFinishedReason": {},
"csubJobFinishedTime": {},
"csubJobIdNext": {},
"csubJobIndexedAttributes": {},
"csubJobMatchAcctSessionId": {},
"csubJobMatchAuthenticated": {},
"csubJobMatchCircuitId": {},
"csubJobMatchDanglingDuration": {},
"csubJobMatchDhcpClass": {},
"csubJobMatchDnis": {},
"csubJobMatchDomain": {},
"csubJobMatchDomainIpAddr": {},
"csubJobMatchDomainIpAddrType": {},
"csubJobMatchDomainIpMask": {},
"csubJobMatchDomainVrf": {},
"csubJobMatchIdentities": {},
"csubJobMatchMacAddress": {},
"csubJobMatchMedia": {},
"csubJobMatchMlpNegotiated": {},
"csubJobMatchNasPort": {},
"csubJobMatchNativeIpAddr": {},
"csubJobMatchNativeIpAddrType": {},
"csubJobMatchNativeIpMask": {},
"csubJobMatchNativeVrf": {},
"csubJobMatchOtherParams": {},
"csubJobMatchPbhk": {},
"csubJobMatchProtocol": {},
"csubJobMatchRedundancyMode": {},
"csubJobMatchRemoteId": {},
"csubJobMatchServiceName": {},
"csubJobMatchState": {},
"csubJobMatchSubscriberLabel": {},
"csubJobMatchTunnelName": {},
"csubJobMatchUsername": {},
"csubJobMaxLife": {},
"csubJobMaxNumber": {},
"csubJobQueryResultingReportSize": {},
"csubJobQuerySortKey1": {},
"csubJobQuerySortKey2": {},
"csubJobQuerySortKey3": {},
"csubJobQueueJobId": {},
"csubJobReportSession": {},
"csubJobStartedTime": {},
"csubJobState": {},
"csubJobStatus": {},
"csubJobStorage": {},
"csubJobType": {},
"csubSessionAcctSessionId": {},
"csubSessionAuthenticated": {},
"csubSessionAvailableIdentities": {},
"csubSessionByType": {},
"csubSessionCircuitId": {},
"csubSessionCreationTime": {},
"csubSessionDerivedCfg": {},
"csubSessionDhcpClass": {},
"csubSessionDnis": {},
"csubSessionDomain": {},
"csubSessionDomainIpAddr": {},
"csubSessionDomainIpAddrType": {},
"csubSessionDomainIpMask": {},
"csubSessionDomainVrf": {},
"csubSessionIfIndex": {},
"csubSessionIpAddrAssignment": {},
"csubSessionLastChanged": {},
"csubSessionLocationIdentifier": {},
"csubSessionMacAddress": {},
"csubSessionMedia": {},
"csubSessionMlpNegotiated": {},
"csubSessionNasPort": {},
"csubSessionNativeIpAddr": {},
"csubSessionNativeIpAddr2": {},
"csubSessionNativeIpAddrType": {},
"csubSessionNativeIpAddrType2": {},
"csubSessionNativeIpMask": {},
"csubSessionNativeIpMask2": {},
"csubSessionNativeVrf": {},
"csubSessionPbhk": {},
"csubSessionProtocol": {},
"csubSessionRedundancyMode": {},
"csubSessionRemoteId": {},
"csubSessionServiceIdentifier": {},
"csubSessionState": {},
"csubSessionSubscriberLabel": {},
"csubSessionTunnelName": {},
"csubSessionType": {},
"csubSessionUsername": {},
"cubeEnabled": {},
"cubeTotalSessionAllowed": {},
"cubeVersion": {},
"cufwAIAlertEnabled": {},
"cufwAIAuditTrailEnabled": {},
"cufwAaicGlobalNumBadPDUSize": {},
"cufwAaicGlobalNumBadPortRange": {},
"cufwAaicGlobalNumBadProtocolOps": {},
"cufwAaicHttpNumBadContent": {},
"cufwAaicHttpNumBadPDUSize": {},
"cufwAaicHttpNumBadProtocolOps": {},
"cufwAaicHttpNumDoubleEncodedPkts": {},
"cufwAaicHttpNumLargeURIs": {},
"cufwAaicHttpNumMismatchContent": {},
"cufwAaicHttpNumTunneledConns": {},
"cufwAppConnNumAborted": {},
"cufwAppConnNumActive": {},
"cufwAppConnNumAttempted": {},
"cufwAppConnNumHalfOpen": {},
"cufwAppConnNumPolicyDeclined": {},
"cufwAppConnNumResDeclined": {},
"cufwAppConnNumSetupsAborted": {},
"cufwAppConnSetupRate1": {},
"cufwAppConnSetupRate5": {},
"cufwCntlL2StaticMacAddressMoved": {},
"cufwCntlUrlfServerStatusChange": {},
"cufwConnGlobalConnSetupRate1": {},
"cufwConnGlobalConnSetupRate5": {},
"cufwConnGlobalNumAborted": {},
"cufwConnGlobalNumActive": {},
"cufwConnGlobalNumAttempted": {},
"cufwConnGlobalNumEmbryonic": {},
"cufwConnGlobalNumExpired": {},
"cufwConnGlobalNumHalfOpen": {},
"cufwConnGlobalNumPolicyDeclined": {},
"cufwConnGlobalNumRemoteAccess": {},
"cufwConnGlobalNumResDeclined": {},
"cufwConnGlobalNumSetupsAborted": {},
"cufwConnNumAborted": {},
"cufwConnNumActive": {},
"cufwConnNumAttempted": {},
"cufwConnNumHalfOpen": {},
"cufwConnNumPolicyDeclined": {},
"cufwConnNumResDeclined": {},
"cufwConnNumSetupsAborted": {},
"cufwConnReptAppStats": {},
"cufwConnReptAppStatsLastChanged": {},
"cufwConnResActiveConnMemoryUsage": {},
"cufwConnResEmbrConnMemoryUsage": {},
"cufwConnResHOConnMemoryUsage": {},
"cufwConnResMemoryUsage": {},
"cufwConnSetupRate1": {},
"cufwConnSetupRate5": {},
"cufwInspectionStatus": {},
"cufwL2GlobalArpCacheSize": {},
"cufwL2GlobalArpOverflowRate5": {},
"cufwL2GlobalEnableArpInspection": {},
"cufwL2GlobalEnableStealthMode": {},
"cufwL2GlobalNumArpRequests": {},
"cufwL2GlobalNumBadArpResponses": {},
"cufwL2GlobalNumDrops": {},
"cufwL2GlobalNumFloods": {},
"cufwL2GlobalNumIcmpRequests": {},
"cufwL2GlobalNumSpoofedArpResps": {},
"cufwPolAppConnNumAborted": {},
"cufwPolAppConnNumActive": {},
"cufwPolAppConnNumAttempted": {},
"cufwPolAppConnNumHalfOpen": {},
"cufwPolAppConnNumPolicyDeclined": {},
"cufwPolAppConnNumResDeclined": {},
"cufwPolAppConnNumSetupsAborted": {},
"cufwPolConnNumAborted": {},
"cufwPolConnNumActive": {},
"cufwPolConnNumAttempted": {},
"cufwPolConnNumHalfOpen": {},
"cufwPolConnNumPolicyDeclined": {},
"cufwPolConnNumResDeclined": {},
"cufwPolConnNumSetupsAborted": {},
"cufwUrlfAllowModeReqNumAllowed": {},
"cufwUrlfAllowModeReqNumDenied": {},
"cufwUrlfFunctionEnabled": {},
"cufwUrlfNumServerRetries": {},
"cufwUrlfNumServerTimeouts": {},
"cufwUrlfRequestsDeniedRate1": {},
"cufwUrlfRequestsDeniedRate5": {},
"cufwUrlfRequestsNumAllowed": {},
"cufwUrlfRequestsNumCacheAllowed": {},
"cufwUrlfRequestsNumCacheDenied": {},
"cufwUrlfRequestsNumDenied": {},
"cufwUrlfRequestsNumProcessed": {},
"cufwUrlfRequestsNumResDropped": {},
"cufwUrlfRequestsProcRate1": {},
"cufwUrlfRequestsProcRate5": {},
"cufwUrlfRequestsResDropRate1": {},
"cufwUrlfRequestsResDropRate5": {},
"cufwUrlfResTotalRequestCacheSize": {},
"cufwUrlfResTotalRespCacheSize": {},
"cufwUrlfResponsesNumLate": {},
"cufwUrlfServerAvgRespTime1": {},
"cufwUrlfServerAvgRespTime5": {},
"cufwUrlfServerNumRetries": {},
"cufwUrlfServerNumTimeouts": {},
"cufwUrlfServerReqsNumAllowed": {},
"cufwUrlfServerReqsNumDenied": {},
"cufwUrlfServerReqsNumProcessed": {},
"cufwUrlfServerRespsNumLate": {},
"cufwUrlfServerRespsNumReceived": {},
"cufwUrlfServerStatus": {},
"cufwUrlfServerVendor": {},
"cufwUrlfUrlAccRespsNumResDropped": {},
"cvActiveCallStatsAvgVal": {},
"cvActiveCallStatsMaxVal": {},
"cvActiveCallWMValue": {},
"cvActiveCallWMts": {},
"cvBasic": {"1": {}, "2": {}, "3": {}},
"cvCallActiveACOMLevel": {},
"cvCallActiveAccountCode": {},
"cvCallActiveCallId": {},
"cvCallActiveCallerIDBlock": {},
"cvCallActiveCallingName": {},
"cvCallActiveCoderTypeRate": {},
"cvCallActiveConnectionId": {},
"cvCallActiveDS0s": {},
"cvCallActiveDS0sHighNotifyEnable": {},
"cvCallActiveDS0sHighThreshold": {},
"cvCallActiveDS0sLowNotifyEnable": {},
"cvCallActiveDS0sLowThreshold": {},
"cvCallActiveERLLevel": {},
"cvCallActiveERLLevelRev1": {},
"cvCallActiveEcanReflectorLocation": {},
"cvCallActiveFaxTxDuration": {},
"cvCallActiveImgPageCount": {},
"cvCallActiveInSignalLevel": {},
"cvCallActiveNoiseLevel": {},
"cvCallActiveOutSignalLevel": {},
"cvCallActiveSessionTarget": {},
"cvCallActiveTxDuration": {},
"cvCallActiveVoiceTxDuration": {},
"cvCallDurationStatsAvgVal": {},
"cvCallDurationStatsMaxVal": {},
"cvCallDurationStatsThreshold": {},
"cvCallHistoryACOMLevel": {},
"cvCallHistoryAccountCode": {},
"cvCallHistoryCallId": {},
"cvCallHistoryCallerIDBlock": {},
"cvCallHistoryCallingName": {},
"cvCallHistoryCoderTypeRate": {},
"cvCallHistoryConnectionId": {},
"cvCallHistoryFaxTxDuration": {},
"cvCallHistoryImgPageCount": {},
"cvCallHistoryNoiseLevel": {},
"cvCallHistorySessionTarget": {},
"cvCallHistoryTxDuration": {},
"cvCallHistoryVoiceTxDuration": {},
"cvCallLegRateStatsAvgVal": {},
"cvCallLegRateStatsMaxVal": {},
"cvCallLegRateWMValue": {},
"cvCallLegRateWMts": {},
"cvCallRate": {},
"cvCallRateHiWaterMark": {},
"cvCallRateMonitorEnable": {},
"cvCallRateMonitorTime": {},
"cvCallRateStatsAvgVal": {},
"cvCallRateStatsMaxVal": {},
"cvCallRateWMValue": {},
"cvCallRateWMts": {},
"cvCallVolConnActiveConnection": {},
"cvCallVolConnMaxCallConnectionLicenese": {},
"cvCallVolConnTotalActiveConnections": {},
"cvCallVolMediaIncomingCalls": {},
"cvCallVolMediaOutgoingCalls": {},
"cvCallVolPeerIncomingCalls": {},
"cvCallVolPeerOutgoingCalls": {},
"cvCallVolumeWMTableSize": {},
"cvCommonDcCallActiveCallerIDBlock": {},
"cvCommonDcCallActiveCallingName": {},
"cvCommonDcCallActiveCodecBytes": {},
"cvCommonDcCallActiveCoderTypeRate": {},
"cvCommonDcCallActiveConnectionId": {},
"cvCommonDcCallActiveInBandSignaling": {},
"cvCommonDcCallActiveVADEnable": {},
"cvCommonDcCallHistoryCallerIDBlock": {},
"cvCommonDcCallHistoryCallingName": {},
"cvCommonDcCallHistoryCodecBytes": {},
"cvCommonDcCallHistoryCoderTypeRate": {},
"cvCommonDcCallHistoryConnectionId": {},
"cvCommonDcCallHistoryInBandSignaling": {},
"cvCommonDcCallHistoryVADEnable": {},
"cvForwNeighborEntry": {"4": {}, "5": {}, "6": {}, "7": {}, "8": {}, "9": {}},
"cvForwRouteEntry": {
"10": {},
"11": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cvForwarding": {"1": {}, "2": {}, "3": {}, "5": {}, "6": {}, "7": {}, "8": {}},
"cvGeneralDSCPPolicyNotificationEnable": {},
"cvGeneralFallbackNotificationEnable": {},
"cvGeneralMediaPolicyNotificationEnable": {},
"cvGeneralPoorQoVNotificationEnable": {},
"cvIfCfgEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cvIfConfigEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cvIfCountInEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"26": {},
"27": {},
"28": {},
"29": {},
"3": {},
"30": {},
"31": {},
"32": {},
"33": {},
"34": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cvIfCountOutEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"26": {},
"27": {},
"28": {},
"29": {},
"3": {},
"30": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cvInterfaceVnetTrunkEnabled": {},
"cvInterfaceVnetVrfList": {},
"cvPeerCfgIfIndex": {},
"cvPeerCfgPeerType": {},
"cvPeerCfgRowStatus": {},
"cvPeerCfgType": {},
"cvPeerCommonCfgApplicationName": {},
"cvPeerCommonCfgDnisMappingName": {},
"cvPeerCommonCfgHuntStop": {},
"cvPeerCommonCfgIncomingDnisDigits": {},
"cvPeerCommonCfgMaxConnections": {},
"cvPeerCommonCfgPreference": {},
"cvPeerCommonCfgSourceCarrierId": {},
"cvPeerCommonCfgSourceTrunkGrpLabel": {},
"cvPeerCommonCfgTargetCarrierId": {},
"cvPeerCommonCfgTargetTrunkGrpLabel": {},
"cvSipMsgRateStatsAvgVal": {},
"cvSipMsgRateStatsMaxVal": {},
"cvSipMsgRateWMValue": {},
"cvSipMsgRateWMts": {},
"cvTotal": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cvVnetTrunkNotifEnable": {},
"cvVoIPCallActiveBitRates": {},
"cvVoIPCallActiveCRC": {},
"cvVoIPCallActiveCallId": {},
"cvVoIPCallActiveCallReferenceId": {},
"cvVoIPCallActiveChannels": {},
"cvVoIPCallActiveCoderMode": {},
"cvVoIPCallActiveCoderTypeRate": {},
"cvVoIPCallActiveConnectionId": {},
"cvVoIPCallActiveEarlyPackets": {},
"cvVoIPCallActiveEncap": {},
"cvVoIPCallActiveEntry": {"46": {}},
"cvVoIPCallActiveGapFillWithInterpolation": {},
"cvVoIPCallActiveGapFillWithPrediction": {},
"cvVoIPCallActiveGapFillWithRedundancy": {},
"cvVoIPCallActiveGapFillWithSilence": {},
"cvVoIPCallActiveHiWaterPlayoutDelay": {},
"cvVoIPCallActiveInterleaving": {},
"cvVoIPCallActiveJBufferNominalDelay": {},
"cvVoIPCallActiveLatePackets": {},
"cvVoIPCallActiveLoWaterPlayoutDelay": {},
"cvVoIPCallActiveLostPackets": {},
"cvVoIPCallActiveMaxPtime": {},
"cvVoIPCallActiveModeChgNeighbor": {},
"cvVoIPCallActiveModeChgPeriod": {},
"cvVoIPCallActiveMosQe": {},
"cvVoIPCallActiveOctetAligned": {},
"cvVoIPCallActiveOnTimeRvPlayout": {},
"cvVoIPCallActiveOutOfOrder": {},
"cvVoIPCallActiveProtocolCallId": {},
"cvVoIPCallActivePtime": {},
"cvVoIPCallActiveReceiveDelay": {},
"cvVoIPCallActiveRemMediaIPAddr": {},
"cvVoIPCallActiveRemMediaIPAddrT": {},
"cvVoIPCallActiveRemMediaPort": {},
"cvVoIPCallActiveRemSigIPAddr": {},
"cvVoIPCallActiveRemSigIPAddrT": {},
"cvVoIPCallActiveRemSigPort": {},
"cvVoIPCallActiveRemoteIPAddress": {},
"cvVoIPCallActiveRemoteUDPPort": {},
"cvVoIPCallActiveReversedDirectionPeerAddress": {},
"cvVoIPCallActiveRobustSorting": {},
"cvVoIPCallActiveRoundTripDelay": {},
"cvVoIPCallActiveSRTPEnable": {},
"cvVoIPCallActiveSelectedQoS": {},
"cvVoIPCallActiveSessionProtocol": {},
"cvVoIPCallActiveSessionTarget": {},
"cvVoIPCallActiveTotalPacketLoss": {},
"cvVoIPCallActiveUsername": {},
"cvVoIPCallActiveVADEnable": {},
"cvVoIPCallHistoryBitRates": {},
"cvVoIPCallHistoryCRC": {},
"cvVoIPCallHistoryCallId": {},
"cvVoIPCallHistoryCallReferenceId": {},
"cvVoIPCallHistoryChannels": {},
"cvVoIPCallHistoryCoderMode": {},
"cvVoIPCallHistoryCoderTypeRate": {},
"cvVoIPCallHistoryConnectionId": {},
"cvVoIPCallHistoryEarlyPackets": {},
"cvVoIPCallHistoryEncap": {},
"cvVoIPCallHistoryEntry": {"48": {}},
"cvVoIPCallHistoryFallbackDelay": {},
"cvVoIPCallHistoryFallbackIcpif": {},
"cvVoIPCallHistoryFallbackLoss": {},
"cvVoIPCallHistoryGapFillWithInterpolation": {},
"cvVoIPCallHistoryGapFillWithPrediction": {},
"cvVoIPCallHistoryGapFillWithRedundancy": {},
"cvVoIPCallHistoryGapFillWithSilence": {},
"cvVoIPCallHistoryHiWaterPlayoutDelay": {},
"cvVoIPCallHistoryIcpif": {},
"cvVoIPCallHistoryInterleaving": {},
"cvVoIPCallHistoryJBufferNominalDelay": {},
"cvVoIPCallHistoryLatePackets": {},
"cvVoIPCallHistoryLoWaterPlayoutDelay": {},
"cvVoIPCallHistoryLostPackets": {},
"cvVoIPCallHistoryMaxPtime": {},
"cvVoIPCallHistoryModeChgNeighbor": {},
"cvVoIPCallHistoryModeChgPeriod": {},
"cvVoIPCallHistoryMosQe": {},
"cvVoIPCallHistoryOctetAligned": {},
"cvVoIPCallHistoryOnTimeRvPlayout": {},
"cvVoIPCallHistoryOutOfOrder": {},
"cvVoIPCallHistoryProtocolCallId": | |
from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
class AppTestSupport(BaseNumpyAppTest):
spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii", "mmap"])
def setup_class(cls):
BaseNumpyAppTest.setup_class.im_func(cls)
cls.w_NoNew = cls.space.appexec([], '''():
from numpy import ndarray
class NoNew(ndarray):
def __new__(cls, subtype):
raise ValueError('should not call __new__')
def __array_finalize__(self, obj):
self.called_finalize = True
return NoNew ''')
cls.w_SubType = cls.space.appexec([], '''():
from numpy import ndarray, array
class SubType(ndarray):
def __new__(obj, input_array):
obj = array(input_array, copy=False).view(obj)
obj.called_new = True
return obj
def __array_finalize__(self, obj):
self.called_finalize = True
return SubType ''')
def test_subtype_ndarray(self):
from numpy import arange, array
a = arange(24, dtype='int32').reshape((6,4))
b = array(a, dtype='float64', subok=True)
assert (a == b).all()
def test_subtype_base(self):
from numpy import ndarray, dtype
class C(ndarray):
def __new__(subtype, shape, dtype):
self = ndarray.__new__(subtype, shape, dtype)
self.id = 'subtype'
return self
a = C((), int)
assert type(a) is C
assert a.shape == ()
assert a.dtype is dtype(int)
assert a.id == 'subtype'
a = C([2, 2], int)
assert isinstance(a, C)
assert isinstance(a, ndarray)
assert a.shape == (2, 2)
assert a.dtype is dtype(int)
assert a.id == 'subtype'
a = a.reshape(1, 4)
b = a.reshape(4, 1)
assert isinstance(b, C)
#make sure __new__ was not called
assert not getattr(b, 'id', None)
a.fill(3)
b = a[0]
assert isinstance(b, C)
assert (b == 3).all()
b[0]=100
assert a[0,0] == 100
assert type(a) is not ndarray
assert a[0,0] == 100
assert a.base is not None
b = a.__array__()
assert type(b) is ndarray
assert b[0,0] == 100
assert b.base is a
def test_subtype_view(self):
from numpy import ndarray, array
class matrix(ndarray):
def __new__(subtype, data, dtype=None, copy=True):
if isinstance(data, matrix):
return data
return data.view(subtype)
a = array(range(5))
b = matrix(a)
assert isinstance(b, matrix)
assert b.__array_priority__ == 0.0
assert (b == a).all()
assert isinstance(b.view(), matrix)
a = array(5)[()]
for s in [matrix, ndarray]:
b = a.view(s)
assert b == a
assert type(b) is type(a)
a = matrix(array(range(5)))
for s in [matrix, ndarray]:
b = ndarray.view(a, s)
assert (b == a).all()
assert type(b) is s
def test_subtype_like_matrix(self):
import numpy as np
arr = np.array([1,2,3])
ret = np.ndarray.__new__(np.ndarray, arr.shape, arr.dtype, buffer=arr)
assert ret.__array_priority__ == 0.0
assert (arr == ret).all()
def test_priority(self):
from numpy import ndarray, arange, add
class DoReflected(object):
__array_priority__ = 10
def __radd__(self, other):
return 42
class A(object):
def __add__(self, other):
return NotImplemented
a = arange(10)
b = DoReflected()
c = A()
assert c + b == 42
assert a.__add__(b) is NotImplemented # not an exception
assert b.__radd__(a) == 42
assert a + b == 42
def test_finalize(self):
#taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray
import numpy as np
class InfoArray(np.ndarray):
def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
strides=None, order='C', info=1):
obj = np.ndarray.__new__(subtype, shape, dtype, buffer,
offset, strides, order)
obj.info = info
return obj
def __array_finalize__(self, obj):
if obj is None:
return
# printing the object itself will crash the test
self.info = 1 + getattr(obj, 'info', 0)
if hasattr(obj, 'info'):
obj.info += 100
obj = InfoArray(shape=(3,))
assert isinstance(obj, InfoArray)
assert obj.info == 1
obj = InfoArray(shape=(3,), info=10)
assert obj.info == 10
v = obj[1:]
assert isinstance(v, InfoArray)
assert v.base is obj
assert v.info == 11
arr = np.arange(10)
cast_arr = arr.view(InfoArray)
assert isinstance(cast_arr, InfoArray)
assert cast_arr.base is arr
assert cast_arr.info == 1
# Test that setshape calls __array_finalize__
cast_arr.shape = (5,2)
z = cast_arr.info
assert z == 101
def test_sub_where(self):
from numpy import where, ones, zeros, array
a = array([1, 2, 3, 0, -3])
v = a.view(self.NoNew)
b = where(array(v) > 0, ones(5), zeros(5))
assert (b == [1, 1, 1, 0, 0]).all()
# where returns an ndarray irregardless of the subtype of v
assert not isinstance(b, self.NoNew)
def test_sub_repeat(self):
from numpy import array
a = self.SubType(array([[1, 2], [3, 4]]))
b = a.repeat(3)
assert (b == [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]).all()
assert isinstance(b, self.SubType)
def test_sub_flatiter(self):
from numpy import array
a = array(range(9)).reshape(3, 3).view(self.NoNew)
c = array(range(9)).reshape(3, 3)
assert isinstance(a.flat[:] + a.flat[:], self.NoNew)
assert isinstance(a.flat[:] + c.flat[:], self.NoNew)
assert isinstance(c.flat[:] + a.flat[:], self.NoNew)
assert not isinstance(c.flat[:] + c.flat[:], self.NoNew)
def test_sub_getitem_filter(self):
from numpy import array
a = array(range(5))
b = self.SubType(a)
c = b[array([False, True, False, True, False])]
assert c.shape == (2,)
assert (c == [1, 3]).all()
assert isinstance(c, self.SubType)
assert b.called_new
assert not getattr(c, 'called_new', False)
assert c.called_finalize
def test_sub_getitem_array_int(self):
from numpy import array
a = array(range(5))
b = self.SubType(a)
assert b.called_new
c = b[array([3, 2, 1, 4])]
assert (c == [3, 2, 1, 4]).all()
assert isinstance(c, self.SubType)
assert not getattr(c, 'called_new', False)
assert c.called_finalize
def test_sub_round(self):
from numpy import array
a = array(range(10), dtype=float).view(self.NoNew)
# numpy compatibility
b = a.round(decimals=0)
assert isinstance(b, self.NoNew)
b = a.round(decimals=1)
assert not isinstance(b, self.NoNew)
b = a.round(decimals=-1)
assert not isinstance(b, self.NoNew)
def test_sub_dot(self):
# the returned type is that of the first argument
from numpy import array
a = array(range(12)).reshape(3,4)
b = self.SubType(a)
c = array(range(12)).reshape(4,3).view(self.SubType)
d = c.dot(a)
assert isinstance(d, self.SubType)
assert not getattr(d, 'called_new', False)
assert d.called_finalize
d = a.dot(c)
assert not isinstance(d, self.SubType)
assert not getattr(d, 'called_new', False)
assert not getattr(d, 'called_finalize', False)
def test_sub_reduce(self):
# i.e. sum, max
# test for out as well
from numpy import array
a = array(range(12)).reshape(3,4)
b = self.SubType(a)
c = b.sum(axis=0)
assert (c == [12, 15, 18, 21]).all()
assert isinstance(c, self.SubType)
assert not getattr(c, 'called_new', False)
assert c.called_finalize
d = array(range(4))
c = b.sum(axis=0, out=d)
assert c is d
assert not isinstance(c, self.SubType)
d = array(range(4)).view(self.NoNew)
c = b.sum(axis=0, out=d)
assert c is d
assert isinstance(c, self.NoNew)
def test_sub_call2(self):
# c + a vs. a + c, what about array priority?
from numpy import array
a = array(range(12)).view(self.NoNew)
b = self.SubType(range(12))
c = b + a
assert isinstance(c, self.SubType)
c = a + b
assert isinstance(c, self.NoNew)
d = range(12)
e = a - d
assert isinstance(e, self.NoNew)
def test_sub_call1(self):
from numpy import array, sqrt
a = array(range(12)).view(self.NoNew)
b = sqrt(a)
assert b.called_finalize == True
def test_sub_astype(self):
from numpy import array
a = array(range(12)).view(self.NoNew)
b = a.astype(float)
assert b.called_finalize == True
def test_sub_reshape(self):
from numpy import array
a = array(range(12)).view(self.NoNew)
b = a.reshape(3, 4)
assert b.called_finalize == True
def test___array__(self):
import sys
from numpy import ndarray, array, dtype
class D(ndarray):
def __new__(subtype, shape, dtype):
self = ndarray.__new__(subtype, shape, dtype)
self.id = 'subtype'
return self
class C(object):
def __init__(self, val, dtype):
self.val = val
self.dtype = dtype
def __array__(self, dtype=None):
retVal = D(self.val, dtype)
return retVal
a = C([2, 2], int)
b = array(a, subok=True)
assert b.shape == (2, 2)
assert isinstance(b, D)
c = array(a, float)
assert c.dtype is dtype(float)
def test_array_of_subtype(self):
import numpy as N
# this part of numpy's matrix class causes an infinite loop
# on cpython
import sys
if '__pypy__' not in sys.builtin_module_names:
skip('does not pass on cpython')
class matrix(N.ndarray):
def __new__(subtype, data, dtype=None, copy=True):
print('matrix __new__')
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy: return new.copy()
else: return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = False
if (ndim == 2) and arr.flags.fortran:
order = True
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
def __array_finalize__(self, obj):
print('matrix __array_finalize__',obj)
self._getitem = False
if (isinstance(obj, matrix) and obj._getitem): return
ndim = self.ndim
if (ndim == 2):
return
if (ndim > 2):
newshape = tuple([x for x in self.shape if x > 1])
ndim = len(newshape)
if ndim == 2:
self.shape = newshape
return
elif (ndim > 2):
raise ValueError("shape too large to be a matrix.")
else:
| |
#
#############################################################################
#
# Author: <NAME>
#
# Copyright: <NAME>, TSRI 2011
#
#############################################################################
"""
Collection of helper functions used in different code for Virtual Screening and
other applications
v.0.6
v.0.7 modified Xmas
v.0.8 modified in Jan 2012 for Waters support
"""
from Tkinter import *
from numpy import *
from math import fabs, acos, degrees, sqrt, e
from glob import glob
from string import strip
import os, getopt, re, fnmatch
import string
from sys import exc_info
#import VolumeOperators.trilinterp as ti # XXX PMV dependency!
import Volume.Operators as tp
import Pmw
# CONSTANTS
PI = 3.14159
PI2 = PI*2
# from AD4_parameter.dat Rii/2 values
vdw_radii = { 'H': 1.00, 'HD': 1.00, 'HS': 1.00, 'C': 2.00,
'A': 2.00, 'N': 1.75, 'NA': 1.75, 'NS': 1.75, 'OA': 1.60,
'OS': 1.60, 'F': 1.54, 'Mg': 0.65, 'MG': 0.65, 'P': 2.10,
'SA': 2.00, 'S': 2.00, 'Cl': 2.04, 'CL': 2.04, 'Ca': 0.99,
'CA': 0.99, 'Mn': 0.65, 'MN': 0.65, 'Fe': 0.65, 'FE': 0.65,
'Zn': 0.74, 'ZN': 0.74, 'Br': 2.165, 'BR':2.165, 'I':2.36,
'Z' : 2.00, 'G' : 2.00, 'GA': 2.00, 'J' :2.00, 'Q' :2.00,
'X': 2 } # default vdW for unknown atom
# actual vdw radii THE OVER THERE IS DIAMETER!!!
vdwRadii = { 'A' : 1.00, 'BR' : 1.08, 'Br' : 1.08, 'C' : 1.00,
'CA' : 0.49, 'CL' : 1.02, 'Ca' : 0.49, 'Cl' : 1.02,
'F' : 0.77, 'FE' : 0.33, 'Fe' : 0.33, 'G' : 1.00,
'GA' : 1.00, 'H' : 0.50, 'HD' : 0.50, 'HS' : 0.50,
'I' : 1.18, 'J' : 1.00, 'MG' : 0.33, 'MN' : 0.33,
'Mg' : 0.33, 'Mn' : 0.33, 'N' : 0.88, 'NA' : 0.88,
'NS' : 0.88, 'OA' : 0.80, 'OS' : 0.80, 'P' : 1.05,
'Q' : 1.00, 'S' : 1.00, 'SA' : 1.00, 'X' : 1.00,
'Z' : 1.00, 'ZN' : 0.37, 'Zn' : 0.37,
'X' : 1.5 } # default vdW for unknown atom
bond_lenghts_sq = { 'A' : 1.00, 'C' : 1.00, 'MN' : 0.11, 'GA' : 1.00,
'Zn' : 0.14, 'F' : 0.59, 'ZN' : 0.14, 'H' : 0.25, 'CA' : 0.25,
'Mn' : 0.11, 'Mg' : 0.11, 'N' : 0.77, 'Q' : 1.00, 'P' : 1.10,
'S' : 1.00, 'FE' : 0.11, 'BR' : 1.17, 'X' : 1.00, 'Z' : 1.00,
'HD' : 0.25, 'MG' : 0.11, 'G' : 1.00, 'Cl' : 1.04,
'NA' : 0.77, 'Ca' : 0.25, 'CL' : 1.04, 'OA' : 0.64,
'I' : 1.39, 'Fe' : 0.11, 'Br' : 1.17, 'HS' : 0.25,
'SA' : 1.00, 'NS' : 0.77, 'OS' : 0.64, 'J' : 1.00,
}
bond_lenghts = { 'A' : 1.00, 'C' : 1.00, 'MN' : 0.33,
'GA' : 1.00, 'Zn' : 0.37, 'F' : 0.77,
'ZN' : 0.37, 'H' : 0.50, 'CA' : 0.49,
'Mn' : 0.33, 'Mg' : 0.33, 'N' : 0.88,
'Q' : 1.00, 'P' : 1.05, 'S' : 1.00,
'FE' : 0.33, 'BR' : 1.08, 'X' : 1.00,
'Z' : 1.00, 'HD' : 0.50, 'MG' : 0.33,
'G' : 1.00, 'Cl' : 1.02, 'NA' : 0.88,
'Ca' : 0.49, 'CL' : 1.02, 'OA' : 0.80,
'I' : 1.18, 'Fe' : 0.33, 'Br' : 1.08,
'HS' : 0.50, 'SA' : 1.00, 'NS' : 0.88,
'OS' : 0.80, 'J' : 1.00,
}
ignore_at = [ 'HD', 'H', 'W' ] # TODO experimental, to allow handling special atoms, like waters
METALS = [ 'Mg', 'MG', 'Ca', 'CA', 'Mn', 'MN', 'Fe', 'FE', 'Zn', 'ZN' ]
HCOVBOND = 1.1
# functional stuff and tricks
class QuickStop:
"""empty class used to trigger an except
to speed up checks (try/except VS if/then)
"""
pass
# atom stuff
def pmvAtomStrip(atom, mol_name = None):
""" transform PDB(QT) atoms to PMV syntax
"ATOM 455 N GLY A 48 -2.978 5.488 6.818 1.00 11.64 -0.351 N" (xJ1_xtal)
|
\./
'
xJ1_xtal:A:GLY48:N
"""
chain = atom[21].strip()
res_name = atom[16:21].strip()
res_num = atom[22:26].strip()
atom = atom[12:16].strip()
if not mol_name:
return "%s:%s%s:%s" % (chain, res_name, res_num, atom)
else:
return "%s:%s:%s%s:%s" % (mol_name, chain, res_name, res_num, atom)
"""
def matchThisInteraction(entry1, entry2, strict=True, DEBUG=False):
"" "checks if entry1 is equal or a subset of entry2
Allowed syntax entries are:
CHAIN:RES:AT
CHAIN:RES
:RES:AT
:RES
"" "
# TODO XXX XXX XXX XXX XXX
# add dictionary sets for residue types:
# polar-uncharged
# positively charged AA
# negatively charged
# aromatic
#
# XXX XXX XXX XXX XXX
# entry1 = ":TYR12:"
# entry2 = "A:TYR12:O"
# XXX XXX very fragile! warning with Pi interactions
# B:PHE149~~(66.904,71.516,40.205:70.416,70.616,38.595) <-- no atom level!
if DEBUG:
print "E1:", entry1
print "E2:", entry2
parts = entry1.count(":")
if parts == 2: # vdw, hb, metal (D:DA17:N1)
chain1, res1, at1 = entry1.split(":")
elif parts == 1: # pi interaction (A:MG396:MG)
chain1, res1 = entry1.split(":")
at1 = ""
chain2, res2, at2 = entry2.split(":")
#chain2, res2 = entry2.split(":")
if DEBUG:
print "chain, res, at: comparing..."
print "|%s:%s:%s|" %(chain1, res1, at1),
print "|%s:%s:%s|" %(chain2, res2, at2),
if strict:
if not chain1 or chain1 == chain2:
if not res1 or res1 == res2:
if not at1 or at1 == at2:
if DEBUG: print "[strict] found!"
return True
else:
if not chain1 or chain1 == chain2: # NOTE one letter...
if not res1 or res1 in res2:
if not at1 or at1 in at2:
if DEBUG: print "[no-strict] found!"
return True
return False
"""
def isAtom(l):
return l.startswith("ATOM") or l.startswith("HETATM")
def getCoords(alist, include_hydrogens = True):
"""
fast extraction of atoms from a PDBQT
return { text : org_ascii_lines, coords : numpy_array, atype : atypes_strings }
NOTE if idrogens are excluded "text" and "coords" lenght will mismatch!
"""
coord = []
atoms = []
atype = []
for l in alist:
#if l.startswith("ATOM") or l.startswith("HETATM"):
if isAtom(l):
#at = l.rsplit(None, 1)[1]
at = l.rsplit(None, 1)[1]
at = getAtype(l)
if not at == "HD" or include_hydrogens: # by default, HD are included
coord.append([float(l[30:38]),float(l[38:46]),float(l[46:54])])
#coord.append([atomCoord(l)]) # TODO test if this can do the trick... from atomCoord()
atype.append(at)
atoms.append(l.strip())
return { 'text' : atoms, 'coord' : array( coord, 'f'), 'atype': atype }
def getFlatAtoms(ligand, flat_only = True, skip_hd = True):
# XXX the returned value order should be changed to match
# XXX getCoords... text, coord, atype ?
# used for getting ligand atoms, usually only flat ones
# for aromatic rings detection
flatland = [ "A", "OA", "NA", "N", "SA"]
hydrogens = ['HD', 'H']
data = []
atoms = getAtoms(ligand)
for l in atoms:
#if l.startswith("ATOM") or l.startswith("HETATM"):
atype = getAtype(l)
if not atype in hydrogens:
if (atype in flatland) or (not flat_only):
data.append([atype, atomCoord(l), l])
elif not skip_hd:
data.append([atype, atomCoord(l), l])
return data
def findHbAccepDon(atom_list):
"""identifies HB donors and acceptors in a list of PDBQT atoms
returns : acceptors[] and donors[] lists
"""
H_COV_BOND = 1.1
H_COV_BOND = H_COV_BOND ** 2
acceptor_types = ['OA', 'NA', 'SA']
donor_types = ['N', 'O', 'OA', 'NA', 'SA']
acceptors = []
donors = []
h = []
dcandidate = []
for l in atom_list:
if l.startswith("ATOM") or l.startswith("HETATM"):
l = l.strip()
atype=l.split()[-1]
if atype in acceptor_types:
if not l in acceptors:
acceptors.append(l)
elif atype in donor_types:
if not l in dcandidate:
dcandidate.append(l)
elif atype == 'HD':
if not l in h:
h.append(l)
for a in dcandidate:
for x in h:
if dist(a, x) <= H_COV_BOND:
donors.append(a)
break
return acceptors, donors
def getAtoms(ligand, atomOnly=False, hetOnly=False):
""" in : PDBQT
out: ATOM/HETATM entries
"""
atoms=[]
hetatm=[]
for l in ligand:
if l.startswith("ATOM"):
atoms.append(l)
elif l.startswith("HETATM"):
hetatm.append(l)
if atomOnly: return atoms
elif hetOnly: return hetatm
else: return atoms+hetatm
def atomCoord(a): # works with pdb atoms
"""return atom coords from a single PDB[QT] line"""
#return map(float, [a[30:38], a[38:46], a[46:54]]) ### TEST Wednesday, April 25 2012
return array( [a[30:38], a[38:46], a[46:54]], 'f')
coord = map(float, [a[30:38], a[38:46], a[46:54]]) ### TEST Wednesday, April 25 2012
return array( coord, 'f')
def getPdbOccupancy(a):
""" return pdb atom occupancy"""
try:
return float(a[60:67])
except:
return 0.0
def avgAtoms(atom_list):
atom_list = map(atomCoord, atom_list)
return avgCoord(atom_list)
def avgCoord(atom_list): # TODO change this to become avgAtomCoord
"""returns the average coordinates from a list of PDB(QT) atoms or coordinates"""
avg = [0., 0., 0.,]
#print "GOT | |
# -*- coding: utf-8 -*-
import django
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from django.utils import translation
from django.http import HttpResponseForbidden, HttpResponseRedirect, QueryDict
from urllib.parse import urlparse
from hvad.admin import InlineModelForm
from hvad.admin import translatable_modelform_factory
from hvad.forms import TranslatableModelForm
from hvad.test_utils.fixtures import NormalFixture, UsersFixture
from hvad.test_utils.data import NORMAL
from hvad.test_utils.testcase import HvadTestCase
from hvad.test_utils.project.app.models import Normal, Unique, SimpleRelated, AutoPopulated
class BaseAdminTests(object):
def _get_admin(self, model):
return admin.site._registry[model]
class AdminMethodsTests(HvadTestCase, BaseAdminTests, NormalFixture):
normal_count = 1
def test_all_translations(self):
# Create an unstranslated model and get the translations
myadmin = self._get_admin(Normal)
# Create a english translated model and make sure the active language
# is highlighted in admin with <strong></strong>
obj = Normal.objects.language("en").get(pk=self.normal_id[1])
with translation.override('en'):
# make sure no the call will not generate a spurious query in assertNumQueries
ContentType.objects.get_for_model(Normal)
with self.assertNumQueries(1):
self.assertTrue(myadmin.all_translations(obj).find("<strong>") != -1)
with self.assertNumQueries(1):
# Entries should be linked to the corresponding translation page
self.assertTrue(myadmin.all_translations(obj).find("?language=en") != -1)
with translation.override('th'):
with self.assertNumQueries(1):
self.assertTrue(myadmin.all_translations(obj).find("<strong>") == -1)
# An unsaved object, shouldn't have any translations
obj = Normal()
self.assertEqual(myadmin.all_translations(obj), "")
def test_all_translations_prefetch_related(self):
myadmin = self._get_admin(Normal)
qs = Normal.objects.untranslated().prefetch_related('translations')
obj = qs.get(pk=self.normal_id[1])
with translation.override('en'):
# make sure no the call will not generate a spurious query in assertNumQueries
ContentType.objects.get_for_model(Normal)
with self.assertNumQueries(0):
self.assertTrue(myadmin.all_translations(obj).find("<strong>") != -1)
# Entries should be linked to the corresponding translation page
self.assertTrue(myadmin.all_translations(obj).find("?language=en") != -1)
with translation.override('th'):
with self.assertNumQueries(0):
self.assertTrue(myadmin.all_translations(obj).find("<strong>") == -1)
def test_get_object(self):
# Check if it returns a model, if there is at least one translation
myadmin = self._get_admin(Normal)
get_request = self.request_factory.get('/admin/app/normal/')
obj = Normal.objects.language("en").get(pk=self.normal_id[1])
with translation.override('en'):
self.assertEqual(myadmin.get_object(get_request, obj.pk).pk,
self.normal_id[1])
self.assertEqual(myadmin.get_object(get_request, obj.pk).shared_field,
NORMAL[1].shared_field)
self.assertEqual(myadmin.get_object(get_request, obj.pk).language_code, 'en')
self.assertEqual(myadmin.get_object(get_request, obj.pk).translated_field,
NORMAL[1].translated_field['en'])
with translation.override('th'):
self.assertEqual(myadmin.get_object(get_request, obj.pk).pk,
self.normal_id[1])
self.assertEqual(myadmin.get_object(get_request, obj.pk).shared_field,
NORMAL[1].shared_field)
self.assertEqual(myadmin.get_object(get_request, obj.pk).language_code, 'th')
self.assertEqual(myadmin.get_object(get_request, obj.pk).translated_field, '')
# Check what happens if there is no translations at all
obj = Normal.objects.untranslated().create(shared_field="shared")
Normal.objects.language('all').filter(pk=obj.pk).delete_translations()
with translation.override('en'):
self.assertIs(myadmin.get_object(get_request, obj.pk), None)
def test_get_object_nonexisting(self):
# In case the object doesnt exist, it should return None
myadmin = self._get_admin(Normal)
get_request = self.request_factory.get('/admin/app/normal/')
self.assertEqual(myadmin.get_object(get_request, -1), None)
class NormalAdminTests(HvadTestCase, BaseAdminTests, UsersFixture, NormalFixture):
normal_count = 1
def test_admin_simple(self):
with translation.override('en'):
with self.login_user_context('admin'):
SHARED = 'shared_new'
TRANS = 'trans_new'
url = reverse('admin:app_normal_add')
data = {
'shared_field': SHARED,
'translated_field': TRANS,
'simplerel-TOTAL_FORMS': '0',
'simplerel-INITIAL_FORMS': '0',
'simplerel-MAX_NUM_FORMS': '0',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Normal.objects.count(), self.normal_count + 1)
obj = Normal.objects.language('en').get(shared_field=SHARED)
self.assertEqual(obj.shared_field, SHARED)
self.assertEqual(obj.translated_field, TRANS)
def test_admin_duplicate_simple(self):
with translation.override('en'):
Unique.objects.language('en').create(
shared_field='shared',
translated_field='translated_duplicate',
unique_by_lang='unique_by_lang_1',
)
with self.login_user_context('admin'):
response = self.client.post(reverse('admin:app_unique_add'), {
'shared_field': 'shared2',
'translated_field': 'translated_duplicate',
'unique_by_lang': 'unique_by_lang_2',
})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context_data['errors']), 1)
self.assertEqual(Unique.objects.count(), 1)
def test_admin_duplicate_by_lang(self):
with translation.override('en'):
Unique.objects.language('en').create(
shared_field='shared',
translated_field='translated',
unique_by_lang='unique_by_lang_duplicate',
)
with self.login_user_context('admin'):
response = self.client.post(reverse('admin:app_unique_add'), {
'shared_field': 'shared2',
'translated_field': 'translated2',
'unique_by_lang': 'unique_by_lang_duplicate',
})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context_data['errors']), 1)
self.assertEqual(Unique.objects.count(), 1)
def test_admin_auto_populated(self):
"""
This only works if we create the translation attribute before saving
the instance. Otherwise the overridden save() method can't access the
translated field during the initial save(), and it crashes.
"""
with translation.override('en'):
with self.login_user_context('admin'):
danish_string = u"rød grød med fløde"
url = reverse('admin:app_autopopulated_add')
data = {
'translated_name': danish_string,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(AutoPopulated.objects.count(), 1)
obj = AutoPopulated.objects.language('en').get()
self.assertEqual(obj.translated_name, danish_string)
self.assertEqual(obj.slug, "rd-grd-med-flde")
def test_admin_change_form_title(self):
with translation.override('en'):
with self.login_user_context('admin'):
url = reverse('admin:app_normal_change', args=(self.normal_id[1],))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue('en' in response.content.decode('utf-8'))
def test_admin_change_form_language_tabs(self):
with self.settings(LANGUAGES=(('en', 'English'),
('fr', 'French'),
('ja', 'Japanese'))):
with translation.override('en'):
with self.login_user_context('admin'):
url = reverse('admin:app_normal_change', args=(self.normal_id[1],))
test_data = (
{},
{'_changelist_filters': 'q=searchparam'},
{'_changelist_filters': 'q=searchparam', 'language': 'fr'},
)
for data in test_data:
response = self.client.get(url, data=data)
self.assertEqual(response.status_code, 200)
tabs = response.context['language_tabs']
for actual_tab_url, name, key, status, del_url in tabs:
self.assertEqual(
QueryDict(urlparse(actual_tab_url).query).dict(),
dict(data, language=key)
)
self.assertEqual(url, urlparse(actual_tab_url).path)
self.assertEqual(status, 'current' if key == data.get('language', 'en') else
'available' if key in self.translations else
'empty')
expected_del_url = reverse('admin:app_normal_delete_translation',
args=(self.normal_id[1], key))
self.assertEqual(del_url, expected_del_url if key in self.translations else None)
def test_admin_change_form_action_url(self):
with translation.override('en'):
with self.login_user_context('admin'):
url = reverse('admin:app_normal_change', args=(self.normal_id[1],))
tests = (
'',
'language=fr',
'_changelist_filters=q%3Dparam&language=fr',
)
for query_string in tests:
expected_dict = QueryDict(query_string)
full_url = '%s?%s' % (url, query_string) if query_string else url
response = self.client.get(full_url)
form_url = urlparse(response.context['form_url'])
self.assertEqual(expected_dict, QueryDict(form_url.query),
'query_string=%r' % query_string)
def test_admin_change_form_redirect_add_another(self):
lang = 'en'
with translation.override('ja'):
with self.login_user_context('admin'):
url = '%s?language=%s' % (reverse('admin:app_normal_change',
args=(self.normal_id[1],)), lang)
data = {
'translated_field': 'English NEW',
'shared_field': NORMAL[1].shared_field,
'_addanother': '1',
'simplerel-TOTAL_FORMS': '0',
'simplerel-INITIAL_FORMS': '0',
'simplerel-MAX_NUM_FORMS': '0',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302, response.content)
expected_url = '%s?language=%s' % (reverse('admin:app_normal_add'), lang)
self.assertTrue(response['Location'].endswith(expected_url))
obj = Normal.objects.language('en').get(pk=self.normal_id[1])
self.assertEqual(obj.translated_field, "English NEW")
def test_admin_change_form_redirect_continue_edit(self):
lang = 'en'
with translation.override('ja'):
with self.login_user_context('admin'):
url = '%s?language=%s' % (reverse('admin:app_normal_change',
args=(self.normal_id[1],)), lang)
data = {
'translated_field': 'English NEW',
'shared_field': NORMAL[1].shared_field,
'_continue': '1',
'simplerel-TOTAL_FORMS': '0',
'simplerel-INITIAL_FORMS': '0',
'simplerel-MAX_NUM_FORMS': '0',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302, response.content)
self.assertTrue(response['Location'].endswith(url))
obj = Normal.objects.language('en').get(pk=self.normal_id[1])
self.assertEqual(obj.translated_field, "English NEW")
url2 = reverse('admin:app_normal_change', args=(self.normal_id[1],))
data = {
'translated_field': 'Japanese',
'shared_field': NORMAL[1].shared_field,
'_continue': '1',
'simplerel-TOTAL_FORMS': '0',
'simplerel-INITIAL_FORMS': '0',
'simplerel-MAX_NUM_FORMS': '0',
}
response = self.client.post(url2, data)
self.assertEqual(response.status_code, 302, response.content)
self.assertTrue(response['Location'].endswith(url2))
obj = Normal.objects.language('ja').get(pk=self.normal_id[1])
self.assertEqual(obj.translated_field, "Japanese")
obj = Normal.objects.language('en').get(pk=self.normal_id[1])
self.assertEqual(obj.translated_field, "English NEW")
def test_admin_change_form(self):
with translation.override('en'):
with self.login_user_context('admin'):
url = reverse('admin:app_normal_change', args=(self.normal_id[1],))
data = {
'translated_field': 'English NEW',
'shared_field': NORMAL[1].shared_field,
'simplerel-TOTAL_FORMS': '0',
'simplerel-INITIAL_FORMS': '0',
'simplerel-MAX_NUM_FORMS': '0',
}
response = self.client.post(url, data)
expected_url = reverse('admin:app_normal_changelist')
self.assertEqual(response.status_code, 302, response.content)
self.assertTrue(response['Location'].endswith(expected_url))
obj = Normal.objects.language('en').get(pk=self.normal_id[1])
self.assertEqual(obj.translated_field, "English NEW")
def test_admin_dual(self):
SHARED = 'shared_new'
TRANS_EN = 'English'
TRANS_JA = u'日本語'
with self.login_user_context('admin'):
url = reverse('admin:app_normal_add')
data_en = {
'shared_field': SHARED,
'translated_field': TRANS_EN,
'simplerel-TOTAL_FORMS': '0',
'simplerel-INITIAL_FORMS': '0',
'simplerel-MAX_NUM_FORMS': '0',
}
data_ja = {
'shared_field': SHARED,
'translated_field': TRANS_JA,
'simplerel-TOTAL_FORMS': '0',
'simplerel-INITIAL_FORMS': '0',
'simplerel-MAX_NUM_FORMS': '0',
}
with translation.override('en'):
response = self.client.post(url, data_en)
self.assertEqual(response.status_code, 302)
self.assertEqual(Normal.objects.untranslated().count(), self.normal_count + 1)
with translation.override('ja'):
response = self.client.post(url, data_ja)
self.assertEqual(response.status_code, 302)
self.assertEqual(Normal.objects.untranslated().count(), self.normal_count + 2)
en = Normal.objects.language('en').get(shared_field=SHARED)
self.assertEqual(en.shared_field, SHARED)
self.assertEqual(en.translated_field, TRANS_EN)
ja = Normal.objects.language('ja').get(shared_field=SHARED)
self.assertEqual(ja.shared_field, SHARED)
self.assertEqual(ja.translated_field, TRANS_JA)
def test_admin_with_param(self):
with translation.override('ja'):
with self.login_user_context('admin'):
SHARED = 'shared_new'
TRANS = 'trans'
url = reverse('admin:app_normal_add')
data = {
'shared_field': SHARED,
'translated_field': TRANS,
'simplerel-TOTAL_FORMS': '0',
'simplerel-INITIAL_FORMS': '0',
'simplerel-MAX_NUM_FORMS': '0',
}
response = self.client.post("%s?language=en" % url, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Normal.objects.untranslated().count(), self.normal_count + 1)
obj = Normal.objects.language('en').get(shared_field=SHARED)
self.assertEqual(obj.shared_field, SHARED)
self.assertEqual(obj.translated_field, TRANS)
def test_admin_change_popup(self):
from django.contrib.admin.options import IS_POPUP_VAR
with translation.override('en'):
with self.login_user_context('admin'):
url = reverse('admin:app_normal_change', args=(self.normal_id[1],))
data = {
'translated_field': 'English NEW',
'shared_field': NORMAL[1].shared_field,
'simplerel-TOTAL_FORMS': '0',
'simplerel-INITIAL_FORMS': '0',
'simplerel-MAX_NUM_FORMS': '0',
IS_POPUP_VAR: '1',
}
response = self.client.post(url, data)
self.assertIn(response.status_code, [200, 302], response.content)
obj = Normal.objects.language('en').get(pk=self.normal_id[1])
self.assertEqual(obj.translated_field, "English NEW")
class AdminEditTests(HvadTestCase, BaseAdminTests, NormalFixture, UsersFixture):
normal_count = 2
def test_changelist(self):
url = reverse('admin:app_normal_changelist')
request = self.request_factory.get(url)
normaladmin = self._get_admin(Normal)
with translation.override('en'):
queryset = normaladmin.get_queryset(request)
self.assertEqual(queryset.count(), self.normal_count)
class AdminDeleteTranslationsTests(HvadTestCase, BaseAdminTests, UsersFixture, NormalFixture):
normal_count = 1
translations = ('en', 'ja')
def test_delete_last_translation(self):
Normal.objects.language('ja').delete_translations()
url = reverse('admin:app_normal_delete_translation', args=(self.normal_id[1], 'en'))
with self.login_user_context('admin'):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/hvad/deletion_not_allowed.html')
self.assertTrue(Normal.objects.language('en').filter(pk=self.normal_id[1]).exists())
def test_delete_translation_get(self):
url = reverse('admin:app_normal_delete_translation', args=(self.normal_id[1], 'en'))
with self.login_user_context('admin'):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/delete_confirmation.html')
self.assertTrue(Normal.objects.language('en').filter(pk=self.normal_id[1]).exists())
self.assertTrue(Normal.objects.language('ja').filter(pk=self.normal_id[1]).exists())
def test_delete_translation_post(self):
url = reverse('admin:app_normal_delete_translation', args=(self.normal_id[1], 'en'))
with self.login_user_context('admin'):
response = self.client.post(url, {'post': 'yes'})
self.assertEqual(response.status_code, HttpResponseRedirect.status_code)
self.assertRaises(Normal.DoesNotExist,
Normal.objects.language('en').get, pk=self.normal_id[1])
self.assertTrue(Normal.objects.language('ja').filter(pk=self.normal_id[1]).exists())
def test_delete_translation_no_obj(self):
url = reverse('admin:app_normal_delete_translation', args=(-1, 'en'))
with self.login_user_context('admin'):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_delete_no_perms(self):
url = reverse('admin:app_normal_delete_translation', args=(self.normal_id[1], 'en'))
with self.login_user_context('staff'):
response = self.client.get(url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
class AdminNoFixturesTests(HvadTestCase, BaseAdminTests):
def test_get_change_form_base_template(self):
normaladmin = self._get_admin(Normal)
template = normaladmin.get_change_form_base_template()
#HACK navigate through incompatibility between django template engine
# deprecation path and django extends tag in version 1.8
if hasattr(template, 'template'):
template = template.template
self.assertEqual(template.name, 'admin/change_form.html')
def test_translatable_modelform_factory(self):
t = translatable_modelform_factory('en', Normal, fields=['shared_field'], exclude=['id'])
self.assertCountEqual(t.Meta.fields, ['shared_field'])
self.assertCountEqual(t.Meta.exclude, ['id', 'translations'])
t = translatable_modelform_factory('en', Normal, fields=['shared_field'], exclude=['id'])
self.assertCountEqual(t.Meta.fields, ['shared_field'])
self.assertCountEqual(t.Meta.exclude, ['id', 'translations'])
class TestForm(TranslatableModelForm):
class Meta:
fields = ['shared_field']
exclude = ['id']
t = translatable_modelform_factory('en', Normal, form=TestForm)
self.assertCountEqual(t.Meta.fields, ['shared_field'])
self.assertCountEqual(t.Meta.exclude, ['id', 'translations'])
class AdminRelationTests(HvadTestCase, BaseAdminTests, UsersFixture, NormalFixture):
normal_count = 1
def create_fixtures(self):
super(AdminRelationTests, self).create_fixtures()
self.simple = SimpleRelated.objects.language('en').create(
normal_id=self.normal_id[1], translated_field='English inline'
)
self.simple.translated_fields.create(language_code='fr', translated_field='French inline')
self.simple.translated_fields.create(language_code='da', translated_field='Danish inline')
def test_correct_id_in_inline(self):
with translation.override('da'):
instance = SimpleRelated.objects.get(pk=self.simple.pk)
class ExampleInlineForm(InlineModelForm):
class Meta:
model = SimpleRelated
exclude = []
form = ExampleInlineForm(instance=instance)
self.assertTrue(form.initial["id"] == instance.id)
def test_adding_related_object(self):
url = reverse('admin:app_simplerelated_add')
TRANS_FIELD = "English Content"
with translation.override('en'):
en = Normal.objects.get(pk=self.normal_id[1])
with self.login_user_context('admin'):
data = {
'normal': self.normal_id[1],
'translated_field': TRANS_FIELD,
'_continue': '1',
}
response = self.client.post(url, data)
simplerel = SimpleRelated.objects.language().get(translated_field=TRANS_FIELD)
self.assertEqual(simplerel.normal.pk, en.pk)
expected_url = reverse('admin:app_simplerelated_change', args=(simplerel.pk,))
self.assertRedirects(response, | |
fits.Header))
def test_dir_with_no_fits_files(self, tmpdir):
empty_dir = tmpdir.mkdtemp()
some_file = empty_dir.join('some_file.txt')
some_file.dump('words')
with catch_warnings() as w:
collection = ImageFileCollection(location=empty_dir.strpath,
keywords=['imagetyp'])
assert len(w) == 1
assert str(w[0].message) == "no FITS files in the collection."
assert collection.summary is None
for hdr in collection.headers():
# this statement should not be reached if there are no FITS files
assert 0
def test_dir_with_no_keys(self, tmpdir):
# This test should fail if the FITS files in the directory
# are actually read.
bad_dir = tmpdir.mkdtemp()
not_really_fits = bad_dir.join('not_fits.fit')
not_really_fits.dump('I am not really a FITS file')
# make sure an error will be generated if the FITS file is read
with pytest.raises(IOError):
fits.getheader(not_really_fits.strpath)
log = tmpdir.join('tmp.log')
self._setup_logger(log.strpath)
_ = ImageFileCollection(location=bad_dir.strpath, keywords=[])
with open(log.strpath) as f:
warnings = f.read()
# ImageFileCollection will suppress the IOError but log a warning
# so check that the log has no warnings in it.
assert (len(warnings) == 0)
def test_fits_summary_when_keywords_are_not_subset(self, triage_setup):
"""
Catch case when there is overlap between keyword list
passed to the ImageFileCollection and to files_filtered
but the latter is not a subset of the former.
"""
ic = ImageFileCollection(triage_setup.test_dir,
keywords=['imagetyp', 'exptime'])
n_files = len(ic.files)
files_missing_this_key = ic.files_filtered(imagetyp='*',
monkeys=None)
assert(n_files > 0)
assert(n_files == len(files_missing_this_key))
def test_duplicate_keywords_in_setting(self, triage_setup):
keywords_in = ['imagetyp', 'a', 'a']
ic = ImageFileCollection(triage_setup.test_dir,
keywords=keywords_in)
for key in set(keywords_in):
assert (key in ic.keywords)
# one keyword gets added: file
assert len(ic.keywords) < len(keywords_in) + 1
def test_keyword_includes_file(self, triage_setup):
keywords_in = ['file', 'imagetyp']
ic = ImageFileCollection(triage_setup.test_dir,
keywords=keywords_in)
assert 'file' in ic.keywords
file_keywords = [key for key in ic.keywords if key == 'file']
assert len(file_keywords) == 1
def test_setting_keywords_to_none(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['imagetyp'])
ic.keywords = None
assert ic.summary == []
def test_getting_value_for_keyword(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['imagetyp'])
# Does it fail if the keyword is not in the summary?
with pytest.raises(ValueError):
ic.values('filter')
# If I ask for unique values do I get them?
values = ic.values('imagetyp', unique=True)
assert values == list(set(ic.summary['imagetyp']))
assert len(values) < len(ic.summary['imagetyp'])
# Does the list of non-unique values match the raw column?
values = ic.values('imagetyp', unique=False)
assert values == list(ic.summary['imagetyp'])
# Does unique actually default to false?
values2 = ic.values('imagetyp')
assert values == values2
def test_collection_when_one_file_not_fits(self, triage_setup):
not_fits = 'foo.fit'
path_bad = os.path.join(triage_setup.test_dir, not_fits)
# create an empty file...
with open(path_bad, 'w'):
pass
ic = ImageFileCollection(triage_setup.test_dir, keywords=['imagetyp'])
assert not_fits not in ic.summary['file']
os.remove(path_bad)
def test_data_type_mismatch_in_fits_keyword_values(self, triage_setup):
# If one keyword has an unexpected type, do we notice?
img = np.uint16(np.arange(100))
bad_filter = fits.PrimaryHDU(img)
bad_filter.header['imagetyp'] = 'LIGHT'
bad_filter.header['filter'] = 15.0
path_bad = os.path.join(triage_setup.test_dir, 'bad_filter.fit')
bad_filter.writeto(path_bad)
ic = ImageFileCollection(triage_setup.test_dir, keywords=['filter'])
# dtype is object when there is a mix of types
assert ic.summary['filter'].dtype == np.dtype('O')
os.remove(path_bad)
def test_filter_by_numerical_value(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis'])
should_be_zero = ic.files_filtered(naxis=2)
assert len(should_be_zero) == 0
should_not_be_zero = ic.files_filtered(naxis=1)
assert len(should_not_be_zero) == triage_setup.n_test['files']
def test_files_filtered_with_full_path(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis'])
files = ic.files_filtered(naxis=1, include_path=True)
for f in files:
assert f.startswith(triage_setup.test_dir)
def test_unknown_generator_type_raises_error(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis'])
with pytest.raises(ValueError):
for foo in ic._generator('not a real generator'):
pass
def test_setting_write_location_to_bad_dest_raises_error(self, tmpdir,
triage_setup):
new_tmp = tmpdir.mkdtemp()
bad_directory = new_tmp.join('foo')
ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis'])
with pytest.raises(IOError):
for hdr in ic.headers(save_location=bad_directory.strpath):
pass
def test_initializing_from_table(self, triage_setup):
keys = ['imagetyp', 'filter']
ic = ImageFileCollection(triage_setup.test_dir, keywords=keys)
table = ic.summary
table_path = os.path.join(triage_setup.test_dir, 'input_tbl.csv')
nonsense = 'forks'
table['imagetyp'][0] = nonsense
table.write(table_path, format='ascii', delimiter=',')
with catch_warnings() as w:
ic = ImageFileCollection(location=None, info_file=table_path)
# By using location=None we don't have actual files in the collection.
assert len(w) == 2
assert "'info_file' argument is deprecated" in str(w[0].message)
assert str(w[1].message) == "no FITS files in the collection."
# keywords can only have been set from saved table
for key in keys:
assert key in ic.keywords
# no location, so should be no files
assert len(ic.files) == 0
# no location, so no way to iterate over files
with pytest.raises((AttributeError, TypeError)):
for h in ic.headers():
pass
with catch_warnings() as w:
ic = ImageFileCollection(location=triage_setup.test_dir,
info_file=table_path)
assert len(w) == 1
assert "'info_file' argument is deprecated" in str(w[0].message)
# we now have a location, so did we get files?
assert len(ic.files) == len(table)
# Is the summary table masked?
assert ic.summary.masked
# can I loop over headers?
for h in ic.headers():
assert isinstance(h, fits.Header)
# Does ImageFileCollection summary contain values from table?
assert nonsense in ic.summary['imagetyp']
def test_initializing_from_table_file_that_does_not_exist(
self, triage_setup, tmpdir):
log = tmpdir.join('tmp.log')
self._setup_logger(log.strpath)
# Do we get a warning if we try reading a file that doesn't exist,
# but where we can initialize from a directory?
with catch_warnings() as w:
ic = ImageFileCollection(
location=triage_setup.test_dir,
info_file='iufadsdhfasdifre')
assert len(w) == 1
assert "'info_file' argument is deprecated" in str(w[0].message)
with open(log.strpath) as f:
warnings = f.readlines()
assert (len(warnings) == 1)
is_in = ['unable to open table file' in w for w in warnings]
assert all(is_in)
# Do we raise an error if the table name is bad AND the location
# is None?
with pytest.raises(IOError):
# Because the location is None we get a Warning about "no files in
# the collection".
with catch_warnings() as w:
ImageFileCollection(location=None, info_file='iufadsdhfasdifre')
assert len(w) == 2
assert "'info_file' argument is deprecated" in str(w[0].message)
assert str(w[1].message) == "no FITS files in the collection."
# Do we raise an error if the table name is bad AND
# the location is given but is bad?
with pytest.raises(OSError):
with catch_warnings() as w:
ic = ImageFileCollection(location='dasifjoaurun',
info_file='iufadsdhfasdifre')
assert len(w) == 1
assert "'info_file' argument is deprecated" in str(w[0].message)
def test_no_fits_files_in_collection(self):
with catch_warnings(AstropyUserWarning) as warning_lines:
# FIXME: What exactly does this assert?
assert "no fits files in the collection."
def test_initialization_with_no_keywords(self, triage_setup):
# This test is primarily historical -- the old default for
# keywords was an empty list (it is now the wildcard '*').
ic = ImageFileCollection(location=triage_setup.test_dir, keywords=[])
# iteration below failed before bugfix...
execs = 0
for h in ic.headers():
execs += 1
assert not execs
def check_all_keywords_in_collection(self, image_collection):
lower_case_columns = [c.lower() for c in
image_collection.summary.colnames]
for h in image_collection.headers():
for k in h:
assert k.lower() in lower_case_columns
def test_tabulate_all_keywords(self, triage_setup):
ic = ImageFileCollection(location=triage_setup.test_dir, keywords='*')
self.check_all_keywords_in_collection(ic)
def test_summary_table_is_always_masked(self, triage_setup):
# First, try grabbing all of the keywords
ic = ImageFileCollection(location=triage_setup.test_dir, keywords='*')
assert ic.summary.masked
# Now, try keywords that every file will have
ic.keywords = ['bitpix']
assert ic.summary.masked
# What about keywords that include some that will surely be missing?
ic.keywords = ['bitpix', 'dsafui']
assert ic.summary.masked
def test_case_of_keywords_respected(self, triage_setup):
keywords_in = ['BitPix', 'instrume', 'NAXIS']
ic = ImageFileCollection(location=triage_setup.test_dir,
keywords=keywords_in)
for key in keywords_in:
assert key in ic.summary.colnames
def test_grabbing_all_keywords_and_specific_keywords(self, triage_setup):
keyword_not_in_headers = 'OIdn89!@'
ic = ImageFileCollection(triage_setup.test_dir,
keywords=['*', keyword_not_in_headers])
assert keyword_not_in_headers in ic.summary.colnames
self.check_all_keywords_in_collection(ic)
def test_grabbing_all_keywords_excludes_empty_key(self, triage_setup):
# This test needs a file with a blank keyword in it to ensure
# that case is handled correctly.
blank_keyword = fits.PrimaryHDU()
blank_keyword.data = np.zeros((100, 100))
blank_keyword.header[''] = 'blank'
blank_keyword.writeto(os.path.join(triage_setup.test_dir,
'blank.fits'))
ic = ImageFileCollection(triage_setup.test_dir, keywords='*')
assert 'col0' not in ic.summary.colnames
def test_header_with_long_history_roundtrips_to_disk(self, triage_setup):
# I tried combing several history comments into one table entry with
# '\n'.join(history), which resulted in a table that couldn't
# round trip to disk because on read the newline character was
# interpreted as...a new line! This test is a check against future
# foolishness.
from astropy.table import Table
img = np.uint16(np.arange(100))
long_history = fits.PrimaryHDU(img)
long_history.header['imagetyp'] = 'BIAS'
long_history.header['history'] = 'Something happened'
long_history.header['history'] = 'Then something else happened'
long_history.header['history'] = 'And then something odd happened'
path_history = os.path.join(triage_setup.test_dir, 'long_history.fit')
long_history.writeto(path_history)
ic = ImageFileCollection(triage_setup.test_dir, keywords='*')
ic.summary.write('test_table.txt', format='ascii.csv')
table_disk = Table.read('test_table.txt', format='ascii.csv')
assert len(table_disk) == len(ic.summary)
@pytest.mark.skipif("os.environ.get('APPVEYOR') or os.sys.platform == 'win32'",
reason="fails on Windows because file "
"overwriting fails")
def test_refresh_method_sees_added_keywords(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords='*')
# Add a keyword I know isn't already in the header to each file.
not_in_header = 'BARKARK'
for h in ic.headers(overwrite=True):
h[not_in_header] = True
assert not_in_header not in ic.summary.colnames
ic.refresh()
# After refreshing the odd keyword should be present.
assert not_in_header.lower() in ic.summary.colnames
def test_refresh_method_sees_added_files(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords='*')
# Compressed files don't get copied. Not sure why...
original_len = len(ic.summary) - triage_setup.n_test['compressed']
| |
as a numpy array. It specifies the number of stars in each
block in each zone to make it easy to seek to the right place.
"""
# noinspection SpellCheckingInspection
self.index = np.genfromtxt(self._index_file, dtype=[('start', np.uint32), ('nstars', np.uint16)],
usecols=[0, 1])
@staticmethod
def get_zone_block(ra: Real, dec: Real) -> Tuple[int, int]:
"""
This tells you the zone and block number that correspond to a specific ra dec pair
:param ra: the right ascension under consideration in units of degrees
:param dec: The declination under consideration in units of degrees
:return: a tuple containing the zone and block location for the ra/dec
"""
# zones are based on dec
zone = int(np.floor((dec + 90) / 0.2) + 1)
# blocks are based on ra
ra_ind = int(np.floor(ra / 0.25) + 1)
# need this check because ra is < 360, not <=
if ra_ind > 1440:
ra_ind = 1440
# need this check for when the max dec is 90, because this is included in the last file
if zone > 900:
zone = 900
return zone, ra_ind
@staticmethod
def get_index_ind(zone: int, ra_ind: int) -> int:
"""
This determines the location in the index that corresponds to the requested zone and block
:param zone: The zone number
:param ra_ind: The right ascension index
:return:
"""
return (zone - 1) * 1440 + ra_ind - 1
@staticmethod
def convert_to_giant_catalogue(ucac_records: pd.DataFrame) -> pd.DataFrame:
"""
This method converts records in the catalogue format into records in the GIANT format.
This is done by renaming columns and converting units.
:param ucac_records: The raw records from the catalogue as a pandas DataFrame
:return: The GIANT records as a Pandas DataFrame
"""
# prep the ucac data frame (set the full index)
ucac_records = ucac_records.assign(source='UCAC4').reset_index().rename(columns={'index': 'rnm'})
ucac_records = ucac_records.set_index(['source', 'zone', 'rnz', 'rnm'])
records = ucac_records.loc[:, _UCAC_COLS]
records.rename(columns=_UCAC_TO_GIANT, inplace=True)
records = records.assign(epoch=2000.0)
records.dtypes.loc[GIANT_COLUMNS] = GIANT_TYPES
# replace invalid magnitudes with the ucac model magnitude
invalid_mag = records['mag'] == 20000
records.loc[invalid_mag, 'mag'] = ucac_records.loc[invalid_mag, 'magm'].astype(np.float64)
# convert to giant units
records['ra'] /= DEG2MAS # MAS to DEG
records['dec'] /= DEG2MAS # MAS to DEG
records['dec'] -= 90. # SPD to DEC
records['distance_sigma'] /= records['distance'] ** 2 # convert parallax std to distance std
records['distance'] /= 1000 # MAS to arcsecond
records['distance'] **= -1 # parallax to distance (arcsecond to parsec)
records['distance'] *= PARSEC2KM # parsec to kilometers
records['mag'] /= 1000. # mMAG to MAG
records['ra_sigma'] += 128 # to uint
records['ra_sigma'] /= DEG2MAS # to deg
records['dec_sigma'] += 128 # to uint
records['dec_sigma'] /= DEG2MAS # to deg
records['ra_proper_motion'] /= 10 * DEG2MAS # 0.1 MAS/YR to DEG/YR
records['dec_proper_motion'] /= 10 * DEG2MAS # 0.1 MAS/YR to DEG/YR
records['ra_pm_sigma'] /= 10 * DEG2MAS # 0.1 MAS/YR to DEG/YR
records['dec_pm_sigma'] /= 10 * DEG2MAS # 0.1 MAS/YR to DEG/YR
records['distance_sigma'] *= 1000 # 1/MAS to parsec
records['distance_sigma'] *= PARSEC2KM # parsec to km
# convert the sigmas to J2000
# noinspection SpellCheckingInspection
ra_shift_time = 2000 - (ucac_records['cepra'] / 100 + 1900)
# noinspection SpellCheckingInspection
dec_shift_time = 2000 - (ucac_records['cepdc'] / 100 + 1900)
records['ra_sigma'] = np.sqrt(records['ra_sigma'] ** 2 + ra_shift_time ** 2 * records['ra_pm_sigma'] ** 2)
records['dec_sigma'] = np.sqrt(records['dec_sigma'] ** 2 + dec_shift_time ** 2 * records['dec_pm_sigma'] ** 2)
# fix for stars with no parallax -- The distance standard deviation seems wrong for these
default_distance_error = 20 / (STAR_DIST / PARSEC2KM / 1000) ** 2 * PARSEC2KM * 1000
records['distance_sigma'].fillna(value=default_distance_error, inplace=True)
records['distance'].replace([np.inf, -np.inf], STAR_DIST, inplace=True)
# fix for stars where the parallax is invalid
records.loc[records.distance < 0, 'distance'] = STAR_DIST
records.loc[records.distance < 0, 'distance_sigma'] = default_distance_error
# specify that the epoch of the stars is J2000
return records
def cross_ref_tycho(self, ucac_labels: ARRAY_LIKE, tycho_cat: Optional[Tycho2] = None) -> pd.DataFrame:
"""
This retrieves the Tycho 2 catalogue records for the requested UCAC4 star Zone, RNZ values.
:param tycho_cat: The tycho catalogue instance to use. If None, a default instance will be created
:param ucac_labels: The UCAC 4 labels as an iterable of Zone, RNZ pairs
:return: The raw Tycho 2 star records.
"""
if tycho_cat is None:
tycho_cat = Tycho2()
out = []
# loop through all of the UCAC stars
for label in ucac_labels:
# compose the UCAC id as included in the u4xtycho file
query = label[0] * 1e6 + label[1]
# search the u4xtycho file to see if this corresponds to a tycho star
line = binary_search(self._tycho_cross_file, query, column=1)
if line is not None:
line = line.decode()
# determine the tycho2 id from the u4xtycho file
t2id = ' '.join([line[:4], line[5:10], line[11]])
# get the tycho2 star record
rec = tycho_cat.retrieve_record(t2id)
out.append(rec)
else:
warnings.warn('The requested UCAC4 star does not have a tycho reference associated with it: {}'.format(
label
))
# todo: may need to figure out how to get a unique id here so it doesn't get overwritten
out.append(tycho_cat.nan_frame())
return pd.concat(out)
def dump_to_sqlite(self, database_connection: Connection, limiting_mag: Real = 20, use_tycho_mag: bool = False,
return_locations: bool = False, return_mag: Optional[Real] = None) -> Optional[pd.DataFrame]:
"""
Use this to write the catalogue to a sqlite3 database in the GIANT format.
You can control what stars/data are included using the key word argument inputs. You can also have this return
the star magnitude/locations for doing blending stars.
In general you should not use this directly. Instead you should use :func:`~.giant_catalogue.build_catalogue`
or script :mod:`~.scripts.build_catalogue`.
:param database_connection: The connection to the database the data is to be dumped to
:param limiting_mag: The maximum magnitude to include in the catalogue. This is based off of the APASM_V
magnitude or the UCAC4 magm magnitude, depending on which is available
:param use_tycho_mag: This flag stores the magnitude from the Tycho2 catalogue for each star that is in the
Tycho2 catalogue. Note that this will be very slow.
:param return_locations: This flag specifies to return locations for stars for doing blending
:param return_mag: This flag specifies to only return locations for stars that are brighter than this magnitude.
If ``None`` then all stars are returned.
:return: A dataframe of the dumped stars that meet the ``return_mag`` condition or ``None`` if
``return_locations`` is ``False``
"""
from .tycho import Tycho2
# if we want to use the tycho information in place of UCAC4 then create the tycho catalogue interface
if use_tycho_mag:
tycho = Tycho2()
else:
tycho = None
# list for returning the results if we are doing that
out = []
start = time.time()
print('dumping zone {}'.format(1), flush=True)
# loop through each zone file and dump it
for ind, records in enumerate(self.query_catalogue_raw(max_visual_mag=limiting_mag, generator=True)):
# convert into the GIANT format
giant_records = self.convert_to_giant_catalogue(records)
# if we are cross referencing the tycho catalogue do it
if use_tycho_mag:
tycho_recs = self.cross_ref_tycho(giant_records.index.droplevel(['source', 'rnm']).tolist(),
tycho_cat=tycho)
# add a column so we can see where the tycho information came from
giant_records.loc[:, 'tycho id'] = ''
for star_index, magnitude_index in enumerate(giant_records.index):
if not np.isnan(tycho_recs.iloc[star_index].VTmag):
# update the magnitude if we found a tycho record
giant_records.loc[magnitude_index, 'tycho id'] = '{}-{}-{}'.format(
*tycho_recs.iloc[star_index].name
)
giant_records.loc[magnitude_index, 'mag'] = tycho_recs.iloc[star_index].VTmag
# set the index to be the rnm
giant_records = giant_records.reset_index().set_index('rnm')
# dump it out to the GIANT catalogue in the stars table
giant_records.to_sql('stars', database_connection, if_exists='append')
print('zone dumped in {:.3f} secs'.format(time.time() - start), flush=True)
if return_locations:
if return_mag is not None:
out.append(giant_records.loc[giant_records.mag <= return_mag, ["ra", "dec", "mag"]])
else:
out.append(giant_records.loc[:, ["ra", "dec", "mag"]])
zone = ind + 2
start = time.time()
print('dumping zone {}'.format(zone), flush=True)
if return_locations:
return pd.concat(out)
class ColumnOrder(Enum):
"""
This enumeration specifies whether a column is sorted in ascending or descending order.
This is intended to be used as an input to :func:`binary_search`.
"""
ASCENDING = "ASCENDING"
"""
The column is sorted in ascending order (smallest first)
"""
DESCENDING = "DESCENDING"
"""
The column is sorted in DESCENDING order (smallest last)
"""
def binary_search(file: BinaryIO, label: Any, column: int = 0,
separator: Optional[str] = None, column_conversion: Callable = float,
order: Union[ColumnOrder, str] = | |
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import appdirs
import configargparse
import datetime
import io
import json
import lxml.etree
import os
import pycountry
import six
import sys
# If this script is renamed to 'xml2rfc.py' on a Windows system, the import
# of the real xml2rfc module will break. In order to handle this, we remove
# the directory of the script file from the python system path:
script_dir = os.path.dirname(os.path.realpath(__file__))
if script_dir in sys.path:
sys.path.remove(script_dir)
import xml2rfc
try:
from xml2rfc import debug
debug.debug = True
except ImportError:
pass
def get_missing_pdf_libs():
missing = ""
if not xml2rfc.HAVE_WEASYPRINT:
missing += "\nCould not import weasyprint"
if not xml2rfc.HAVE_PYCAIRO:
missing += "\nCould not import pycairo"
if not xml2rfc.HAVE_CAIRO:
missing += "\nCould not find the cairo lib"
if not xml2rfc.HAVE_PANGO:
missing += "\nCould not find the pango lib"
return missing
def print_pi_help(options, parser):
pis = xml2rfc.parser.XmlRfc(None, None).pis.items()
pis.sort()
print("Available processing instructions (PIs), with defaults:\n")
for k, v in pis:
if isinstance(v, type('')):
print(' %-20s "%s"' % (k,v))
else:
print(' %-20s %s' % (k,v))
sys.exit()
def print_country_help(options, parser):
from xml2rfc.util.postal import country_alias
country_ids = {}
for c in list(pycountry.countries):
key = c.alpha_2
country_ids[key] = []
for a in ['alpha_2', 'alpha_3', 'name', 'official_name', ]:
if hasattr(c, a):
v = getattr(c, a)
if not v in country_ids[key]:
country_ids[key].append(v)
for k, v in country_alias.items():
c = pycountry.countries.lookup(v)
if not k in country_ids[c.alpha_2]:
country_ids[c.alpha_2].append(k)
ids = list(country_ids.values())
ids.sort()
print('Known country codes and country names for use with <country>:\n')
if six.PY3:
print(('\n'.join([ ' '+' - '.join(v) for v in ids])))
else:
print(('\n'.join([ ' '+' - '.join(v) for v in ids])).encode('utf-8'))
sys.exit()
def get_pdf_help(missing_libs=""):
pdf_requirements_info = """
In order to generate PDFs, xml2rfc uses the WeasyPrint library, which
depends on external libaries that must be installed as native packages.
1. First, install the Cairo, Pango, and GDK-PixBuf library files on your
system. See installation instructions on the WeasyPrint Docs:
https://weasyprint.readthedocs.io/en/stable/install.html
(Python 3 is not needed if your system Python is 2.7, though).
2. Next, install the pycairo and weasyprint python modules using pip.
Depending on your system, you may need to use 'sudo' or install in
user-specific directories, using the --user switch. On OS X in
particular, you may also need to install a newer version of setuptools
using --user before weasyprint can be installed. If you install with
the --user switch, you may need to also set PYTHONPATH, e.g.,
PYTHONPATH=/Users/username/Library/Python/2.7/lib/python/site-packages
for Python 2.7.
The basic pip commands (modify as needed according to the text above)
are:
pip install 'pycairo>=1.18' 'weasyprint<=0.42.3'
3. Finally, install the full Noto Font and Roboto Mono packages:
* Download the full font file from:
https://noto-website-2.storage.googleapis.com/pkgs/Noto-unhinted.zip
or follow the 'DOWNLOAD ALL FONTS' link on this page:
https://www.google.com/get/noto/
* Follow the installation instructions at
https://www.google.com/get/noto/help/install/
* Go to https://fonts.google.com/specimen/Roboto+Mono, and download the
font. Follow the installation instructions above, as applied to this
download.
With these libraries, modules, and fonts installed and available to
xml2rfc, the --pdf switch will be enabled.
"""
return pdf_requirements_info + missing_libs
def print_pdf_help(options, parser):
print(get_pdf_help())
sys.exit()
def print_version(options, parser):
versions = xml2rfc.get_versions()
print('%s %s' % versions[0])
if options.verbose:
for item in versions[1:]:
print(' %s %s' % item)
def print_values(options, parser, config_paths):
print("\n"
+ parser.format_values()
+ "%s\n %s" % ('Config file search path:', config_paths))
def extract_anchor_info(xml):
info = {
'version': 1,
'sections': {},
}
for item in xml.xpath('./middle//section'):
anchor = item.get('anchor')
label = item.get('pn')
if anchor and label and not anchor.startswith('anchor-'):
info['sections'][anchor] = label.replace('section-','')
return info
optionparser = None
def main():
global optionparser
# Populate options
config_paths = ['/etc/xml2rfc.conf', '~/.xml2rfc.conf']
user_conf = os.path.join(appdirs.user_config_dir(), 'xml2rfc.conf')
if not user_conf in config_paths:
config_paths.append(user_conf)
optionparser = configargparse.ArgumentParser(usage='xml2rfc [OPTIONS] SOURCE [OPTIONS]'
'...\nExample: xml2rfc '
'draft.xml -b draft-foo-19 --text --html',
add_help=False,
add_config_file_help=False,
default_config_files=config_paths,
)
input_options = optionparser.add_argument_group('Positional arguments')
input_options.add_argument('source', nargs='?', help="Input XML file to render to one or more of the available formats.")
help_options = optionparser.add_argument_group('Documentation options',
'Some options to generate built-in documentation.')
help_options.add_argument('-h', '--help', action='help',
help='show a help message and exit')
help_options.add_argument('--docfile', action='store_true',
help='generate a documentation XML file ready for formatting')
help_options.add_argument('--manpage', action='store_true',
help='show paged text documentation')
help_options.add_argument('--country-help', action="store_true",
help='show the recognized <country> strings')
help_options.add_argument('--pdf-help', action="store_true",
help='show pdf generation requirements')
# help_options.add_argument('--pi-help', action="store_true",
# help='show the names and default values of PIs (for v2)')
help_options.add_argument('--template-dir',
help='directory to pull the doc.xml and doc.yaml templates from. '
'The default is the "templates" directory of the xml2rfc package')
help_options.add_argument('--values', action='store_true',
help='show option values and from where they come')
help_options.add_argument('-V', '--version', action='store_true',
help='display the version number and exit')
formatgroup = optionparser.add_argument_group('Format selection',
'One or more of the following output formats may be specified. '
'The default is --text. The destination filename will be based '
'on the input filename, unless --out=FILE or --basename=BASE '
'is used.')
formatgroup.add_argument('--text', action='store_true',
help='outputs formatted text to file, with proper page breaks')
formatgroup.add_argument('--html', action='store_true',
help='outputs formatted HTML to file')
formatgroup.add_argument('--nroff', action='store_true',
help='outputs formatted nroff to file (only v2 input)')
if xml2rfc.HAVE_CAIRO and xml2rfc.HAVE_PANGO:
formatgroup.add_argument('--pdf', action='store_true',
help='outputs formatted PDF to file')
else:
formatgroup.add_argument('--pdf', action='store_true',
help='(unavailable due to missing external library)')
formatgroup.add_argument('--raw', action='store_true',
help='outputs formatted text to file, unpaginated (only v2 input)')
formatgroup.add_argument('--expand', action='store_true',
help='outputs XML to file with all references expanded')
formatgroup.add_argument('--v2v3', action='store_true',
help='convert vocabulary version 2 XML to version 3')
formatgroup.add_argument('--preptool', action='store_true',
help='run preptool on the input')
formatgroup.add_argument('--unprep', action='store_true',
help='reduce prepped xml to unprepped')
formatgroup.add_argument('--info', action='store_true',
help='generate a JSON file with anchor to section lookup information')
plain_options = optionparser.add_argument_group('Generic Switch Options')
plain_options.add_argument('-C', '--clear-cache', action='store_true', default=False,
help='purge the cache and exit')
plain_options.add_argument( '--debug', action='store_true',
help='Show debugging output')
plain_options.add_argument('-n', '--no-dtd', action='store_true',
help='disable DTD validation step')
plain_options.add_argument('-N', '--no-network', action='store_true', default=False,
help='don\'t use the network to resolve references')
plain_options.add_argument('-O', '--no-org-info', dest='first_page_author_org', action='store_false', default=True,
help='don\'t show author orgainzation info on page one (legacy only)')
plain_options.add_argument('-q', '--quiet', action='store_true',
help="don't print anything while working")
plain_options.add_argument('--skip-config-files', action="store_true", default=False,
help='ignore config file settings')
plain_options.add_argument('--allow-local-file-access', action="store_true", default=False,
help='Allow local file system references')
plain_options.add_argument('-r', '--remove-pis', action='store_true', default=False,
help='Remove XML processing instructions')
plain_options.add_argument('-u', '--utf8', action='store_true',
help='generate utf8 output')
plain_options.add_argument('-v', '--verbose', action='store_true',
help='print extra information')
value_options = optionparser.add_argument_group('Generic Options with Values')
value_options.add_argument('-b', '--basename', dest='basename', metavar='NAME',
help='specify the base name for output files')
value_options.add_argument('-c', '--cache', dest='cache', metavar='PATH',
help='specify a primary cache directory to write to; default: try [ %s ]'%', '.join(xml2rfc.CACHES) )
value_options.add_argument( '--config-file', dest="config_file", metavar='FILE', is_config_file_arg=True,
help='specify a configuration file')
value_options.add_argument('-d', '--dtd', dest='dtd', metavar='DTDFILE', help='specify an alternate dtd file')
value_options.add_argument('-D', '--date', dest='datestring', metavar='DATE', default=None,
help="run as if the date is DATE (format: yyyy-mm-dd). Default: Today's date")
value_options.add_argument('-f', '--filename', dest='filename', metavar='FILE',
help='Deprecated. The same as -o')
value_options.add_argument('-i', '--indent', type=int, default=2, metavar='INDENT',
help='With some v3 formatters: Indentation to use when pretty-printing XML')
value_options.add_argument('-o', '--out', dest='output_filename', metavar='FILE',
help='specify an explicit output filename')
value_options.add_argument('-p', '--path', dest='output_path', metavar='PATH',
help='specify the directory path for output files')
value_options.add_argument('-s', '--silence', action='append', type=str, metavar='STRING',
help="Silence any warning beginning with the given string")
formatoptions = optionparser.add_argument_group('Generic Format Options')
formatoptions.add_argument('--v3', action='store_true', default=True,
help='with --text and --html: use the v3 formatter, rather than the legacy one')
formatoptions.add_argument('--legacy', '--v2', dest='v3', action='store_false',
help='with --text and --html: use the legacy output formatters, rather than the v3 ones')
formatoptions.add_argument('--id-is-work-in-progress', default=True, action='store_true',
help='in references, refer to Internet-Drafts as "Work in Progress"')
textoptions = optionparser.add_argument_group('Text Format Options')
textoptions.add_argument('--no-headers', dest='omit_headers', action='store_true',
help='calculate page breaks, and emit form feeds and page top'
' spacing, but omit headers and footers from the paginated format')
textoptions.add_argument('--legacy-list-symbols', default=False, action='store_true',
help='use the legacy list bullet symbols, rather than the new ones')
textoptions.add_argument('--legacy-date-format', default=False, action='store_true',
help='use the legacy date format, rather than the new one')
textoptions.add_argument('--no-legacy-date-format', dest='legacy_date_format', action='store_false',
help="don't use the legacy date format")
textoptions.add_argument('--list-symbols', metavar='4*CHAR',
help='use the characters given as list bullet symbols')
textoptions.add_argument('--bom', '--BOM', action='store_true', default=False,
help='Add a BOM (unicode byte order mark) to the start of text files')
textoptions.add_argument('--paginate', '--pagination', dest='pagination', action='store_true', default=None,
help='do pagination')
textoptions.add_argument('-P', '--no-pagination', dest='pagination', action='store_false', default=None,
help='don\'t do pagination of v3 draft text format')
textoptions.add_argument('--table-hyphen-breaks', action='store_true', default=False,
help='More easily do line breaks after hyphens in table cells to give a more compact table')
textoptions.add_argument('--table-borders', default='full', choices=['full', 'light', 'minimal', 'min', ],
help='The style of table borders to use for text output; one of full/light/minimal')
htmloptions = optionparser.add_argument_group('Html Format Options')
htmloptions.add_argument('--css', default=None, metavar="FILE",
help='Use the | |
<reponame>jonathanschilling/idf<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 22:49:47 2019
@author: <NAME> (<EMAIL>)
"""
#%% prepare for code generation
def indented(tabs, lines, indentationChar="\t"):
indentation = ""
for i in range(tabs):
indentation += indentationChar
indented = ''
if '\n' in lines.strip():
for line in lines.split('\n'):
if line != '':
indented += indentation+line+'\n'
else:
indented = indentation+lines#.strip()
return indented
def indent(tabs, lines, indentationChar="\t"):
return tabs+1, indented(tabs, lines, indentationChar)
def unindent(tabs, lines, indentationChar="\t"):
return tabs-1, indented(tabs, lines, indentationChar)
#%% create a Java reading routine for a HDF5 file
from .Hdf5File import Group, Dataset
def javaClassName(name):
"""Make a name like "asdf_adsf" into camel case at the locations of "_" and start with an uppercase letter."""
while "_" in name:
idx = name.index("_")
name = name[:idx]+name[idx+1:idx+2].upper()+name[idx+2:]
return name[0].upper()+name[1:]
def javaVarName(name):
"""Make a name like "asdf_adsf" into camel case at the locations of "_" and start with a lowercase letter."""
while "_" in name:
idx = name.index("_")
name = name[:idx]+name[idx+1:idx+2].upper()+name[idx+2:]
#return name[0].lower()+name[1:] # allow user to actually specify exact variable name
return name
def javaDtype(dtype):
"""Translate the dtypes from the definition into valid Java primitive types or custom classes generated for compund datatypes."""
if dtype=='int' or dtype=='double' or dtype=='boolean':
return dtype
else:
return javaClassName(dtype)
def javaGenClassFromGroup(tabs, group, static=True):
"""Generate Java source code defining a corresponding class from the definition of a Group, recursing into the items.
tabs -- number of indentation marks to prepend to every line of the source code
group -- Group defininition of which a Java class should be generated
static -- select if the generated class should be static or not (defaults to True)
"""
classCode = "\n"
readCodes = []
if group.description is not None:
classCode += indented(tabs, "/** "+group.description+" */\n")
if static:
tabs, decl = indent(tabs, "public static class "+javaClassName(group.getFullName().replace("/", "_"))+" {\n")
else:
tabs, decl = indent(tabs, "public class "+javaClassName(group.getFullName().replace("/", "_"))+" {\n")
classCode += decl
constructorPart=''
memberPart = ''
numComplexMembers = 0
for item in group.items:
if item.description is not None:
memberPart += indented(tabs, "/** "+item.description+" */\n")
memberPart += indented(tabs, 'public ')
if type(item)==Dataset:
memberPart += javaDtype(item.dtype)
readCodes.append(javaRead(tabs, item))
else:
memberPart += javaClassName(item.getFullName().replace("/", "_"))
constructorPart += indented(tabs+1, item.name)+" = new "+javaClassName(item.getFullName().replace("/", "_"))+"();\n"
numComplexMembers+=1
if type(item) == Dataset and item.getRank()>0:
for i in range(item.getRank()):
memberPart += '[]'
memberPart += ' '+item.name+';\n'
if numComplexMembers>0:
classCode += indented(tabs, "/** initialize complex datatypes */\n")
classCode += indented(tabs, 'public '+javaClassName(group.getFullName().replace("/", "_"))+'() {\n')
classCode += constructorPart
classCode += indented(tabs, '}\n\n')
classCode += memberPart
tabs -= 1
classCode += indented(tabs, '} // end of '+javaClassName(group.getFullName().replace("/", "_")))
return classCode, readCodes
def javaRead(tabs, dataset):
"""Generate Java code that reads the given Dataset from a NetcdfFile 'file'.
dataset -- Dataset that should be read
"""
varName = dataset.getFullName()
javaName = varName[1:].replace("/", ".")
rank = dataset.getRank()
readCode = ''
if dataset.dtype=='int':
if rank==0:
readCode = '{javaName} = file.findVariable("{varName}").readScalarInt()'
else:
readCode = '{javaName} = (int'
for r in range(rank):
readCode += '[]'
readCode += ')file.findVariable("{varName}").read()'
if rank==1:
readCode += '.get1DJavaArray(DataType.INT)'
else:
readCode += '.copyToNDJavaArray()'
elif dataset.dtype=='double':
if rank==0:
readCode = '{javaName} = file.findVariable("{varName}").readScalarDouble()'
else:
readCode = '{javaName} = (double'
for r in range(rank):
readCode += '[]'
readCode += ')file.findVariable("{varName}").read()'
if rank==1:
readCode += '.get1DJavaArray(DataType.DOUBLE)'
else:
readCode += '.copyToNDJavaArray()'
elif dataset.dtype=='boolean':
if rank==0:
readCode = '{javaName} = (file.findVariable("{varName}").readScalarInt() > 0 ? true : false)'
else:
print(dataset.getFullName()+" reading not implemented yet")
readCode = '// read {varName} into {javaName}'
# readCode = 'int '
# dimBrackets = ''
# firstElems = []
# for r in range(rank):
# dimBrackets += '[]'
# if r>1:
# firstElems.append(firstElems[r-1]+'[0]')
# else:
# firstElems.append('')
# readCode += dimBrackets+' {javaName}_int = (int'+dimBrackets
# readCode += ')file.findVariable("{varName}").read()'
# if rank==1:
# readCode += '.get1DJavaArray(DataType.INT)'
# else:
# readCode += '.copyToNDJavaArray()'
# readCode += ';\n'
# readCode += indented(tabs, '{javaName} = new boolean')
# for r in range(rank):
# readCode += '[{javaName}_int'+firstElems[r]+'.length];\n'
else:
# custom datatype
print(dataset.getFullName()+" reading not implemented yet")
return readCode.format(javaName=javaName, varName=varName)+';\n'
#%% document who created the reading routines when on which machine
from datetime import datetime
import getpass
import platform
# dd/mm/YY H:M:S in UTC
now_string = datetime.utcnow().strftime('%d/%m/%Y %H:%M:%S UTC')
username = getpass.getuser()
hostname = platform.node()
creation_tag = 'auto-created by a user called \''+username+'\' on a machine called \''+hostname+'\' at '+now_string
#%% actually generate Java class for reading SPEC output files
def genJavaReader(outdir, packageName, className, s):
# we need to reverse the definition order so that types which are used inside other types
# are already defined when used
reverse_rootStack = []
rootStack = []
rootStack.append(s.rootGroup)
while len(rootStack)>0:
currentItem = rootStack[-1]
rootStack = rootStack[:-1]
if currentItem is not s.rootGroup:
reverse_rootStack.append(currentItem)
if type(currentItem)==Group:
for item in currentItem.items:
rootStack.append(item)
javaFilename = outdir+className+".java"
print("creating Java reading class into '"+javaFilename+"'")
# begin code for root group (== enclosing class)
f=open(javaFilename, "w")
tabs=0
f.write("""package """+packageName+""";
// AUTO-GENERATED; DO NOT COMMIT CHANGES TO THIS FILE !
// """+creation_tag+"""
import java.io.IOException;
import java.util.Locale;
import ucar.ma2.DataType;
import ucar.nc2.NetcdfFile;
""")
rootClassCode = ""
if s.rootGroup.description is not None:
rootClassCode += indented(tabs, "/** "+s.rootGroup.description+" */\n")
tabs, decl = indent(tabs, "public class "+className+" {\n")
rootClassCode += decl
numComplexMembers = 0
f.write(rootClassCode)
readParts=[]
# add nested groups
while len(reverse_rootStack)>0:
currentItem = reverse_rootStack[-1]
reverse_rootStack = reverse_rootStack[:-1]
if type(currentItem)==Group:
defCode, readCodes = javaGenClassFromGroup(tabs, currentItem)
f.write(defCode+'\n')
for readCode in readCodes:
readParts.append(readCode)
numComplexMembers+=1
# end code for root group (== enclosing class)
constructorPart=''
memberPart = ''
rootClassCode = ""
for item in s.rootGroup.items:
if item.description is not None:
memberPart += indented(tabs, "/** "+item.description+" */\n")
memberPart += indented(tabs, "public ")
if type(item)==Dataset:
memberPart += javaDtype(item.dtype)
readParts.append(javaRead(tabs, item))
else:
memberPart += javaClassName(item.getFullName().replace("/", "_"))
constructorPart += indented(tabs+1, item.name+" = new "+javaClassName(item.getFullName().replace("/", "_"))+"();\n")
numComplexMembers+=1
if type(item) == Dataset and item.getRank()>0:
for i in range(item.getRank()):
memberPart += '[]'
memberPart += ' '+item.name+';\n'
rootClassCode += "\n"
# constructor to initialize complex members
if numComplexMembers>0:
rootClassCode += indented(tabs, "/** Initialize complex datatypes. */\n")
rootClassCode += indented(tabs, 'public '+className+'() {\n')
rootClassCode += constructorPart
rootClassCode += indented(tabs, '}\n')
# constructors to load data from file
rootClassCode += "\n"
rootClassCode += indented(tabs, "/**\n")
rootClassCode += indented(tabs, " * Initalize complex datatypes and load "+className+" contents from a HDF5 file identified by {@code filename}.\n")
rootClassCode += indented(tabs, " * @param filename path to the HDF5 file to load\n")
rootClassCode += indented(tabs, " */\n")
tabs, line = indent(tabs, "public "+className+"(String filename) {\n")
rootClassCode += line
rootClassCode += indented(tabs, "this();\n")
tabs, line = indent(tabs, "try {\n")
rootClassCode += line
rootClassCode += indented(tabs, "NetcdfFile file = NetcdfFile.open(filename);\n")
rootClassCode += indented(tabs, "loadFrom(file);\n")
rootClassCode += indented(tabs, "file.close();\n")
rootClassCode += indented(tabs-1, "} catch (IOException e) {\n")
rootClassCode += indented(tabs, "e.printStackTrace();\n")
tabs -= 1
rootClassCode += indented(tabs, "}\n")
tabs -= 1
rootClassCode += indented(tabs, "}\n")
rootClassCode += "\n"
rootClassCode += indented(tabs, "/**\n")
rootClassCode += indented(tabs, " * Initalize complex datatypes and load "+className+" contents from an already-open NetCDF file identified by {@code file}.\n")
rootClassCode += indented(tabs, " * @param file open file to load the data from\n")
rootClassCode += indented(tabs, " */\n")
tabs, line = indent (tabs, "public "+className+"(NetcdfFile file) {\n") ; rootClassCode += line
rootClassCode += indented(tabs, "this();\n")
tabs, line = indent (tabs, "try {\n") ; rootClassCode += line
tabs, line = unindent (tabs, "loadFrom(file);\n") ; rootClassCode += line
tabs, line = indent (tabs, "} catch (IOException e) {\n") ; rootClassCode += line
tabs, line = unindent (tabs, "e.printStackTrace();\n") ; rootClassCode += line
tabs, line = unindent (tabs, "}\n") ; rootClassCode += line
rootClassCode += indented (tabs, "}\n")
rootClassCode += "\n"
rootClassCode += memberPart
f.write(rootClassCode)
# definitions part is done; now for the reading routines
rootClassCode = "\n"
rootClassCode += indented(tabs, "/**\n")
rootClassCode += indented(tabs, " * Load "+className+" contents from an already-open NetCDF file identified by {@code file}.\n")
rootClassCode += indented(tabs, " * @param file open file to load the data from\n")
rootClassCode += indented(tabs, " * @return initialized "+className+" object\n")
rootClassCode += indented(tabs, " */\n")
tabs, line = indent(tabs, "public "+className+" loadFrom(NetcdfFile file) throws IOException {\n")
rootClassCode += line
# here goes the magic that actually loads the data from the file
for readPart in readParts:
rootClassCode += indented(tabs, readPart)
tabs, line | |
<reponame>paddybu/xsbs<filename>src/pyscripts/sqlalchemy/databases/postgres.py
# postgres.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009 <NAME> <EMAIL>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the PostgreSQL database.
Driver
------
The psycopg2 driver is supported, available at http://pypi.python.org/pypi/psycopg2/ .
The dialect has several behaviors which are specifically tailored towards compatibility
with this module.
Note that psycopg1 is **not** supported.
Connecting
----------
URLs are of the form `postgres://user:password@host:port/dbname[?key=value&key=value...]`.
PostgreSQL-specific keyword arguments which are accepted by :func:`~sqlalchemy.create_engine()` are:
* *server_side_cursors* - Enable the usage of "server side cursors" for SQL statements which support
this feature. What this essentially means from a psycopg2 point of view is that the cursor is
created using a name, e.g. `connection.cursor('some name')`, which has the effect that result rows
are not immediately pre-fetched and buffered after statement execution, but are instead left
on the server and only retrieved as needed. SQLAlchemy's :class:`~sqlalchemy.engine.base.ResultProxy`
uses special row-buffering behavior when this feature is enabled, such that groups of 100 rows
at a time are fetched over the wire to reduce conversational overhead.
Sequences/SERIAL
----------------
PostgreSQL supports sequences, and SQLAlchemy uses these as the default means of creating
new primary key values for integer-based primary key columns. When creating tables,
SQLAlchemy will issue the ``SERIAL`` datatype for integer-based primary key columns,
which generates a sequence corresponding to the column and associated with it based on
a naming convention.
To specify a specific named sequence to be used for primary key generation, use the
:func:`~sqlalchemy.schema.Sequence` construct::
Table('sometable', metadata,
Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
)
Currently, when SQLAlchemy issues a single insert statement, to fulfill the contract of
having the "last insert identifier" available, the sequence is executed independently
beforehand and the new value is retrieved, to be used in the subsequent insert. Note
that when an :func:`~sqlalchemy.sql.expression.insert()` construct is executed using
"executemany" semantics, the sequence is not pre-executed and normal PG SERIAL behavior
is used.
PostgreSQL 8.3 supports an ``INSERT...RETURNING`` syntax which SQLAlchemy supports
as well. A future release of SQLA will use this feature by default in lieu of
sequence pre-execution in order to retrieve new primary key values, when available.
INSERT/UPDATE...RETURNING
-------------------------
The dialect supports PG 8.3's ``INSERT..RETURNING`` and ``UPDATE..RETURNING`` syntaxes,
but must be explicitly enabled on a per-statement basis::
# INSERT..RETURNING
result = table.insert(postgres_returning=[table.c.col1, table.c.col2]).\\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
result = table.update(postgres_returning=[table.c.col1, table.c.col2]).\\
where(table.c.name=='foo').values(name='bar')
print result.fetchall()
Indexes
-------
PostgreSQL supports partial indexes. To create them pass a postgres_where
option to the Index constructor::
Index('my_index', my_table.c.id, postgres_where=tbl.c.value > 10)
Transactions
------------
The PostgreSQL dialect fully supports SAVEPOINT and two-phase commit operations.
"""
import decimal, random, re, string
from sqlalchemy import sql, schema, exc, util
from sqlalchemy.engine import base, default
from sqlalchemy.sql import compiler, expression
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import types as sqltypes
class PGInet(sqltypes.TypeEngine):
def get_col_spec(self):
return "INET"
class PGCidr(sqltypes.TypeEngine):
def get_col_spec(self):
return "CIDR"
class PGMacAddr(sqltypes.TypeEngine):
def get_col_spec(self):
return "MACADDR"
class PGNumeric(sqltypes.Numeric):
def get_col_spec(self):
if not self.precision:
return "NUMERIC"
else:
return "NUMERIC(%(precision)s, %(scale)s)" % {'precision': self.precision, 'scale' : self.scale}
def bind_processor(self, dialect):
return None
def result_processor(self, dialect):
if self.asdecimal:
return None
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class PGFloat(sqltypes.Float):
def get_col_spec(self):
if not self.precision:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': self.precision}
class PGInteger(sqltypes.Integer):
def get_col_spec(self):
return "INTEGER"
class PGSmallInteger(sqltypes.Smallinteger):
def get_col_spec(self):
return "SMALLINT"
class PGBigInteger(PGInteger):
def get_col_spec(self):
return "BIGINT"
class PGDateTime(sqltypes.DateTime):
def get_col_spec(self):
return "TIMESTAMP " + (self.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
class PGDate(sqltypes.Date):
def get_col_spec(self):
return "DATE"
class PGTime(sqltypes.Time):
def get_col_spec(self):
return "TIME " + (self.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
class PGInterval(sqltypes.TypeEngine):
def get_col_spec(self):
return "INTERVAL"
class PGText(sqltypes.Text):
def get_col_spec(self):
return "TEXT"
class PGString(sqltypes.String):
def get_col_spec(self):
if self.length:
return "VARCHAR(%(length)d)" % {'length' : self.length}
else:
return "VARCHAR"
class PGChar(sqltypes.CHAR):
def get_col_spec(self):
if self.length:
return "CHAR(%(length)d)" % {'length' : self.length}
else:
return "CHAR"
class PGBinary(sqltypes.Binary):
def get_col_spec(self):
return "BYTEA"
class PGBoolean(sqltypes.Boolean):
def get_col_spec(self):
return "BOOLEAN"
class PGBit(sqltypes.TypeEngine):
def get_col_spec(self):
return "BIT"
class PGUuid(sqltypes.TypeEngine):
def get_col_spec(self):
return "UUID"
class PGArray(sqltypes.MutableType, sqltypes.Concatenable, sqltypes.TypeEngine):
def __init__(self, item_type, mutable=True):
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.mutable = mutable
def copy_value(self, value):
if value is None:
return None
elif self.mutable:
return list(value)
else:
return value
def compare_values(self, x, y):
return x == y
def is_mutable(self):
return self.mutable
def dialect_impl(self, dialect, **kwargs):
impl = self.__class__.__new__(self.__class__)
impl.__dict__.update(self.__dict__)
impl.item_type = self.item_type.dialect_impl(dialect)
return impl
def bind_processor(self, dialect):
item_proc = self.item_type.bind_processor(dialect)
def process(value):
if value is None:
return value
def convert_item(item):
if isinstance(item, (list, tuple)):
return [convert_item(child) for child in item]
else:
if item_proc:
return item_proc(item)
else:
return item
return [convert_item(item) for item in value]
return process
def result_processor(self, dialect):
item_proc = self.item_type.result_processor(dialect)
def process(value):
if value is None:
return value
def convert_item(item):
if isinstance(item, list):
return [convert_item(child) for child in item]
else:
if item_proc:
return item_proc(item)
else:
return item
return [convert_item(item) for item in value]
return process
def get_col_spec(self):
return self.item_type.get_col_spec() + '[]'
colspecs = {
sqltypes.Integer : PGInteger,
sqltypes.Smallinteger : PGSmallInteger,
sqltypes.Numeric : PGNumeric,
sqltypes.Float : PGFloat,
sqltypes.DateTime : PGDateTime,
sqltypes.Date : PGDate,
sqltypes.Time : PGTime,
sqltypes.String : PGString,
sqltypes.Binary : PGBinary,
sqltypes.Boolean : PGBoolean,
sqltypes.Text : PGText,
sqltypes.CHAR: PGChar,
}
ischema_names = {
'integer' : PGInteger,
'bigint' : PGBigInteger,
'smallint' : PGSmallInteger,
'character varying' : PGString,
'character' : PGChar,
'"char"' : PGChar,
'name': PGChar,
'text' : PGText,
'numeric' : PGNumeric,
'float' : PGFloat,
'real' : PGFloat,
'inet': PGInet,
'cidr': PGCidr,
'uuid':PGUuid,
'bit':PGBit,
'macaddr': PGMacAddr,
'double precision' : PGFloat,
'timestamp' : PGDateTime,
'timestamp with time zone' : PGDateTime,
'timestamp without time zone' : PGDateTime,
'time with time zone' : PGTime,
'time without time zone' : PGTime,
'date' : PGDate,
'time': PGTime,
'bytea' : PGBinary,
'boolean' : PGBoolean,
'interval':PGInterval,
}
# TODO: filter out 'FOR UPDATE' statements
SERVER_SIDE_CURSOR_RE = re.compile(
r'\s*SELECT',
re.I | re.UNICODE)
class PGExecutionContext(default.DefaultExecutionContext):
def create_cursor(self):
# TODO: coverage for server side cursors + select.for_update()
is_server_side = \
self.dialect.server_side_cursors and \
((self.compiled and isinstance(self.compiled.statement, expression.Selectable)
and not getattr(self.compiled.statement, 'for_update', False)) \
or \
(
(not self.compiled or isinstance(self.compiled.statement, expression._TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
)
self.__is_server_side = is_server_side
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(random.randint(0, 65535))[2:])
return self._connection.connection.cursor(ident)
else:
return self._connection.connection.cursor()
def get_result_proxy(self):
if self.__is_server_side:
return base.BufferedRowResultProxy(self)
else:
return base.ResultProxy(self)
class PGDialect(default.DefaultDialect):
name = 'postgres'
supports_alter = True
supports_unicode_statements = False
max_identifier_length = 63
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
preexecute_pk_sequences = True
supports_pk_autoincrement = False
default_paramstyle = 'pyformat'
supports_default_values = True
supports_empty_insert = False
def __init__(self, server_side_cursors=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
def dbapi(cls):
import psycopg2 as psycopg
return psycopg
dbapi = classmethod(dbapi)
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def type_descriptor(self, typeobj):
return sqltypes.adapt_type(typeobj, colspecs)
def do_begin_twophase(self, connection, xid):
self.do_begin(connection.connection)
def do_prepare_twophase(self, connection, xid):
connection.execute(sql.text("PREPARE TRANSACTION :tid", bindparams=[sql.bindparam('tid', xid)]))
def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False):
if is_prepared:
if recover:
#FIXME: ugly hack to get out of transaction context when commiting recoverable transactions
# Must find out a way how to make the dbapi not open a transaction.
connection.execute(sql.text("ROLLBACK"))
connection.execute(sql.text("ROLLBACK PREPARED :tid", bindparams=[sql.bindparam('tid', xid)]))
connection.execute(sql.text("BEGIN"))
self.do_rollback(connection.connection)
else:
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False):
if is_prepared:
if recover:
connection.execute(sql.text("ROLLBACK"))
connection.execute(sql.text("COMMIT PREPARED :tid", bindparams=[sql.bindparam('tid', xid)]))
connection.execute(sql.text("BEGIN"))
self.do_rollback(connection.connection)
else:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
resultset = connection.execute(sql.text("SELECT gid FROM pg_prepared_xacts"))
return [row[0] for row in resultset]
def get_default_schema_name(self, connection):
return connection.scalar("select current_schema()", None)
get_default_schema_name = base.connection_memoize(
('dialect', 'default_schema_name'))(get_default_schema_name)
def last_inserted_ids(self):
if self.context.last_inserted_ids is None:
raise exc.InvalidRequestError("no INSERT executed, or can't use cursor.lastrowid without PostgreSQL OIDs enabled")
else:
return self.context.last_inserted_ids
def has_table(self, connection, table_name, schema=None):
# seems like case gets folded in pg_class...
if schema is None:
cursor = connection.execute("""select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where n.nspname=current_schema() and lower(relname)=%(name)s""", {'name':table_name.lower().encode(self.encoding)});
else:
cursor = connection.execute("""select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where n.nspname=%(schema)s and lower(relname)=%(name)s""", {'name':table_name.lower().encode(self.encoding), 'schema':schema});
return bool( not not cursor.rowcount )
def has_sequence(self, connection, sequence_name):
cursor = connection.execute('''SELECT relname FROM pg_class WHERE relkind = 'S' AND relnamespace IN ( SELECT oid FROM pg_namespace WHERE nspname NOT LIKE 'pg_%%' AND nspname != 'information_schema' AND relname = %(seqname)s);''', {'seqname': sequence_name.encode(self.encoding)})
return bool(not not cursor.rowcount)
def is_disconnect(self, e):
if isinstance(e, self.dbapi.OperationalError):
return 'closed the connection' in str(e) or 'connection not open' in str(e)
elif isinstance(e, self.dbapi.InterfaceError):
return 'connection already | |
# print('download OMNI2 data from')
# omni2_url='ftp://nssdcftp.gsfc.nasa.gov/pub/data/omni/low_res_omni/omni2_all_years.dat'
# print(omni2_url)
# try: urllib.request.urlretrieve(omni2_url, 'data/omni2_all_years.dat')
# except urllib.error.URLError as e:
# print(' ', omni2_url,' ',e.reason)
else:
#if omni2 hourly data is not yet converted and saved as pickle, do it:
if not os.path.exists('data/omni2_all_years_pickle.p'):
#load OMNI2 dataset from .dat file with a function from dst_module.py
omni = ps.get_omni_data()
#contains: omni time,day,hour,btot,bx,by,bz,bygsm,bzgsm,speed,speedx,den,pdyn,dst,kp
#save for faster loading later
pickle.dump(omni, open('data/omni2_all_years_pickle.p', 'wb') )
else:
omni = pickle.load(open('data/omni2_all_years_pickle.p', 'rb') )
#interpolate to 1 hour steps: make an array from last time in hour steps backwards for 24 hours, then interpolate
#this is the last 24 hours in 1 hour timesteps, 25 data points
#for field
rbtimes24=np.arange(dscovr['time'][-1]-1,dscovr['time'][-1]+1/24,1/24)
btot24=np.interp(rbtimes24,dscovr['time'],dscovr['btot'])
bzgsm24=np.interp(rbtimes24,dscovr['time'],dscovr['bz'])
bygsm24=np.interp(rbtimes24,dscovr['time'],dscovr['by'])
bxgsm24=np.interp(rbtimes24,dscovr['time'],dscovr['bx'])
#for plasma
rptimes24=np.arange(dscovr['time'][-1]-1,dscovr['time'][-1]+1/24,1/24)
rpv24=np.interp(rptimes24,dscovr['time'],dscovr['speed'])
rpn24=np.interp(rptimes24,dscovr['time'],dscovr['density'])
#define times of the future wind, deltat hours after current time
timesfp=np.arange(rptimes24[-1],rptimes24[-1]+1+1/24,1/24)
timesfb=np.arange(rbtimes24[-1],rbtimes24[-1]+1+1/24,1/24)
###calculate Dst for DSCOVR last 7 day data with Burton and OBrien
#this is the last 24 hours in 1 hour timesteps, 25 data points
#start on next day 0 UT, so rbtimes7 contains values at every full hour like the real Dst
rtimes7=np.arange(np.ceil(dscovr['time'])[0],dscovr['time'][-1],1.0000/24)
btot7=np.interp(rtimes7,dscovr['time'],dscovr['btot'])
bzgsm7=np.interp(rtimes7,dscovr['time'],dscovr['bz'])
bygsm7=np.interp(rtimes7,dscovr['time'],dscovr['by'])
bxgsm7=np.interp(rtimes7,dscovr['time'],dscovr['bx'])
rpv7=np.interp(rtimes7,dscovr['time'],dscovr['speed'])
rpn7=np.interp(rtimes7,dscovr['time'],dscovr['density'])
#interpolate NaN values in the hourly interpolated data ******* to add
print('Loaded Kyoto Dst from NOAA for last 7 days.')
#make Dst index from solar wind data
#make_dst_from_wind(btot_in,bx_in, by_in,bz_in,v_in,vx_in,density_in,time_in):#
rdst_temerin_li=ps.predict.calc_dst_temerin_li(rtimes7,btot7,bxgsm7,bygsm7,bzgsm7,rpv7,rpv7,rpn7)
rdst_obrien = ps.predict.calc_dst_obrien(rtimes7, bzgsm7, rpv7, rpn7)
rdst_burton = ps.predict.calc_dst_burton(rtimes7, bzgsm7, rpv7, rpn7)
##################### plot DSCOVR data
sns.set_context("talk")
sns.set_style("darkgrid")
fig=plt.figure(1,figsize=(12,10)) #fig=plt.figure(1,figsize=(14,14))
weite=1
fsize=11
msize=5
#panel 1
ax4 = fig.add_subplot(411)
plt.plot_date(dscovr['time'], dscovr['btot'],'-k', label='B total', linewidth=weite)
if showinterpolated: plt.plot_date(rbtimes24, btot24,'ro', label='B total interpolated last 24 hours',linewidth=weite,markersize=msize)
plt.plot_date(dscovr['time'], dscovr['bz'],'-g', label='Bz GSM',linewidth=weite)
if showinterpolated: plt.plot_date(rbtimes24, bzgsm24,'go', label='Bz GSM interpolated last 24 hours',linewidth=weite,markersize=msize)
#indicate 0 level for Bz
plt.plot_date([rtimes7[0], rtimes7[-1]], [0,0],'--k', alpha=0.5, linewidth=1)
#test interpolation
#plt.plot_date(rtimes7, dscovr['bz']7,'-ko', label='B7',linewidth=weite)
plt.ylabel('Magnetic field [nT]', fontsize=fsize+2)
myformat = DateFormatter('%Y %b %d %Hh')
ax4.xaxis.set_major_formatter(myformat)
ax4.legend(loc='upper left', fontsize=fsize-2,ncol=4)
plt.xlim([np.ceil(dscovr['time'])[0],dscovr['time'][-1]])
plt.ylim(np.nanmin(dscovr['bz'])-10, np.nanmax(dscovr['btot'])+10)
plt.title('L1 DSCOVR real time solar wind provided by NOAA SWPC for '+ str(num2date(timenow))[0:16]+ ' UT', fontsize=16)
plt.xticks(fontsize=fsize)
plt.yticks(fontsize=fsize)
#panel 2
ax5 = fig.add_subplot(412)
#add speed levels
plt.plot_date([rtimes7[0], rtimes7[-1]], [400,400],'--k', alpha=0.3, linewidth=1)
plt.annotate('slow',xy=(rtimes7[0],400),xytext=(rtimes7[0],400),color='k', fontsize=10)
plt.plot_date([rtimes7[0], rtimes7[-1]], [800,800],'--k', alpha=0.3, linewidth=1)
plt.annotate('fast',xy=(rtimes7[0],800),xytext=(rtimes7[0],800),color='k', fontsize=10 )
plt.plot_date(dscovr['time'], dscovr['speed'],'-k', label='V observed',linewidth=weite)
if showinterpolated: plt.plot_date(rptimes24, rpv24,'ro', label='V interpolated last 24 hours',linewidth=weite,markersize=msize)
plt.xlim([np.ceil(dscovr['time'])[0],dscovr['time'][-1]])
#plt.plot_date(rtimes7, rpv7,'-ko', label='B7',linewidth=weite)
plt.ylabel('Speed $\mathregular{[km \\ s^{-1}]}$', fontsize=fsize+2)
ax5.xaxis.set_major_formatter(myformat)
ax5.legend(loc=2,fontsize=fsize-2,ncol=2)
plt.xlim([np.ceil(dscovr['time'])[0],dscovr['time'][-1]])
plt.ylim([np.nanmin(dscovr['speed'])-50,np.nanmax(dscovr['speed'])+100])
plt.xticks(fontsize=fsize)
plt.yticks(fontsize=fsize)
#panel 3
ax6 = fig.add_subplot(413)
plt.plot_date(dscovr['time'], dscovr['density'],'-k', label='N observed',linewidth=weite)
if showinterpolated: plt.plot_date(rptimes24, rpn24,'ro', label='N interpolated last 24 hours',linewidth=weite,markersize=msize)
plt.ylabel('Density $\mathregular{[ccm^{-3}]}$',fontsize=fsize+2)
ax6.xaxis.set_major_formatter(myformat)
ax6.legend(loc=2,ncol=2,fontsize=fsize-2)
plt.ylim([0,np.nanmax(dscovr['density'])+10])
plt.xlim([np.ceil(dscovr['time'])[0],dscovr['time'][-1]])
plt.xticks(fontsize=fsize)
plt.yticks(fontsize=fsize)
#panel 4
ax6 = fig.add_subplot(414)
#model Dst
#******* added timeshift of 1 hour for L1 to Earth! This should be different for each timestep to be exact
#plt.plot_date(rtimes7+1/24, rdst_burton,'-b', label='Dst Burton et al. 1975',markersize=3, linewidth=1)
#plt.plot_date(rtimes7+1/24, rdst_obrien,'-k', label='Dst OBrien & McPherron 2000',markersize=3, linewidth=1)
plt.plot_date(rtimes7+1/24, rdst_temerin_li,'-r', label='Dst Temerin Li 2002',markersize=3, linewidth=1)
#**** This error is only a placeholder
error=15#
plt.fill_between(rtimes7+1/24, rdst_temerin_li-error, rdst_temerin_li+error, alpha=0.2)
#real Dst
#for AER
#plt.plot_date(rtimes7, rdst7,'ko', label='Dst observed',markersize=4)
#for Kyoto
plt.plot_date(dst['time'], dst['dst'],'ko', label='Dst observed',markersize=4)
plt.ylabel('Dst [nT]', fontsize=fsize+2)
ax6.xaxis.set_major_formatter(myformat)
ax6.legend(loc=2,ncol=3,fontsize=fsize-2)
plt.xlim([np.ceil(dscovr['time'])[0],dscovr['time'][-1]])
plt.ylim([np.nanmin(rdst_burton)-50,50])
plt.xticks(fontsize=fsize)
plt.yticks(fontsize=fsize)
#add geomagnetic storm levels
plt.plot_date([rtimes7[0], rtimes7[-1]], [-50,-50],'--k', alpha=0.3, linewidth=1)
plt.annotate('moderate',xy=(rtimes7[0],-50+2),xytext=(rtimes7[0],-50+2),color='k', fontsize=10)
plt.plot_date([rtimes7[0], rtimes7[-1]], [-100,-100],'--k', alpha=0.3, linewidth=1)
plt.annotate('intense',xy=(rtimes7[0],-100+2),xytext=(rtimes7[0],-100+2),color='k', fontsize=10)
plt.plot_date([rtimes7[0], rtimes7[-1]], [-250,-250],'--k', alpha=0.3, linewidth=1)
plt.annotate('super-storm',xy=(rtimes7[0],-250+2),xytext=(rtimes7[0],-250+2),color='k', fontsize=10)
#save plot
filename='real/predstorm_realtime_input_1_'+timenowstr[0:10]+'-'+timenowstr[11:13]+'_'+timenowstr[14:16]+'.jpg'
plt.savefig(filename)
#filename='real/predstorm_realtime_input_1_'+timenowstr[0:10]+'-'+timenowstr[11:13]+'_'+timenowstr[14:16]+'.eps'
#plt.savefig(filename)
################################# (1b) get OMNI training data ##############################
#download from ftp://nssdcftp.gsfc.nasa.gov/pub/data/omni/low_res_omni/omni2_all_years.dat
# if not here download OMNI2 data (only needed first time running the program, currently 155 MB)
#######################
### slice data for comparison of solar wind to Dst conversion
print()
print()
print('OMNI2 1 hour training data, number of points available: ', np.size(omni['speed']))
print('start date:',str(num2date(np.min(omni['time']))))
print('end date:',str(num2date(np.max(omni['time']))))
trainstartnum=date2num(datetime.strptime(trainstart, "%Y-%m-%d %H:%M"))-deltat/24
trainendnum=date2num(datetime.strptime(trainend, "%Y-%m-%d %H:%M"))-deltat/24
print('Training data start and end interval: ', trainstart, ' ', trainend)
####### "now-wind" is 24 hour data ist rptimes24, rpv24, rbtimes24, btot24
#rename for plotting and analysis:
timesnp=rptimes24
speedn=rpv24
timesnb=rbtimes24
btotn=btot24
bzgsmn=bzgsm24
bygsmn=bygsm24
bxn=bxgsm24
denn=rpn24
print()
print()
print('Number of data points in now-wind:', np.size(btotn))
print('Observing and forecasting window delta-T: ',deltat,' hours')
print('Time now: ', str(num2date(timenow)))
print()
print('-------------------------------------------------')
print()
#================================== (2) SLIDING window pattern recognition ==============
# search for matches of the now wind with the training data
calculation_start=time.time()
#---------- sliding window analysis start
#select array from OMNI data as defined by training start and end time
startindex=np.max(np.where(trainstartnum > omni['time']))+1
endindex=np.max(np.where(trainendnum > omni['time']))+1
trainsize=endindex-startindex
print('Data points in training data set: ', trainsize)
#these are the arrays for the correlations between now wind and training data
corr_count_b=np.zeros(trainsize)
corr_count_bz=np.zeros(trainsize)
corr_count_by=np.zeros(trainsize)
corr_count_bx=np.zeros(trainsize)
corr_count_v=np.zeros(trainsize)
corr_count_n=np.zeros(trainsize)
#these are the arrays for the squared distances between now wind and training data
dist_count_b=np.zeros(trainsize)
dist_count_bz=np.zeros(trainsize)
dist_count_by=np.zeros(trainsize)
dist_count_bx=np.zeros(trainsize)
dist_count_v=np.zeros(trainsize)
dist_count_n=np.zeros(trainsize)
## sliding window analysis
for i in np.arange(0,trainsize):
#go forward in time from start of training set in 1 hour increments
#timeslidenum=trainstartnum+i/24
#print(str(num2date(timeslidenum)))
#*** this can be optimized with the startindex from above (so where is not necessary)
#look this time up in the omni data and extract the next deltat hours
#inds=np.where(timeslidenum==times1)[0][0]
#simpler method:
inds=startindex+i
#for btotal field
btots=omni['btot'][inds:inds+deltat+1]
#get correlation of training data btots with now-wind btotn
#corr_count_b[i]=np.corrcoef(btotn,btots)[0][1]
dist_count_b[i]=np.sqrt(np.sum((btotn-btots)**2))/np.size(btotn)
#same for bzgsm
bzgsms=omni['bz'][inds:inds+deltat+1]
#corr_count_bz[i]=np.corrcoef(bzgsmn,bzgsms)[0][1]
dist_count_bz[i]=np.sqrt(np.sum((bzgsmn-bzgsms)**2))/np.size(bzgsmn)
#same for bygsm
bygsms=omni['by'][inds:inds+deltat+1]
dist_count_by[i]=np.sqrt(np.sum((bygsmn-bygsms)**2))/np.size(bygsmn)
#same for bx
bxs=omni['bx'][inds:inds+deltat+1]
dist_count_bx[i]=np.sqrt(np.sum((bxn-bxs)**2))/np.size(bxn)
#same for speed
speeds=omni['speed'][inds:inds+deltat+1]
#when there is no nan:
#if np.sum(np.isnan(speeds)) == 0:
dist_count_v[i]=np.sqrt(np.sum((speedn-speeds)**2))/np.size(speedn)
#corr_count_v[i]=np.corrcoef(speedn,speeds)[0][1]
#see Riley et al. 2017 equation 1 but divided by size
#so this measure is the average rms error
#same for density
dens=omni['density'][inds:inds+deltat+1]
#corr_count_n[i]=np.corrcoef(denn,dens)[0][1]
dist_count_n[i]=np.sqrt(np.sum((denn-dens)**2))/np.size(denn)
### done
#for Btot
#maxval=np.max(corr_count_b)
#maxpos=np.argmax(corr_count_b)
#get top 50 of all correlations, they are at the end of the array
#top50_b=np.argsort(corr_count_b)[-50:-1]
#go forward in time from training data set start to the position of the best match + deltat hours
#(so you take the future part coming after wind where the best match is seen)
#method with minimum rms distance
maxval_b=np.min(dist_count_b)
maxpos_b=np.argmin(dist_count_b)
top50_b=np.argsort(dist_count_b)[0:49]
print('find minimum of B distance at index:')
print(round(maxval_b,1), ' nT index: ',maxpos_b)
indp_b=startindex+maxpos_b+deltat
#select array from OMNI data for predicted wind - all with p at the end
btotp=omni['btot'][indp_b:indp_b+deltat+1]
#for Bx
#method with minimum rms distance
maxval_bx=np.nanmin(dist_count_bx)
maxpos_bx=np.argmin(dist_count_bx)
top50_bx=np.argsort(dist_count_bx)[0:49]
print('find minimum of BzGSM distance at index:')
print(round(maxval_bx,1), ' nT index: ',maxpos_bx)
#go forward in time from training data set start to the position of the best match + deltat hours
#(so you take the future part coming after wind where the best match is seen)
indp_bx=startindex+maxpos_bx+deltat
#select array from OMNI data for predicted wind - predictions all have a p at the end
bxp=omni['bx'][indp_bx:indp_bx+deltat+1]
#for ByGSM
#method with minimum rms distance
maxval_by=np.nanmin(dist_count_by)
maxpos_by=np.argmin(dist_count_by)
top50_by=np.argsort(dist_count_by)[0:49]
print('find minimum of BzGSM distance at index:')
print(round(maxval_by,1), ' nT index: ',maxpos_by)
#go forward in time from training data set start to the position of the best match + deltat hours
#(so you take the future part coming after wind where the best match is seen)
indp_by=startindex+maxpos_by+deltat
#select array from OMNI data for predicted wind - predictions all have a p at the end
byp=omni['by'][indp_by:indp_by+deltat+1]
#for BzGSM
#maxval=np.max(corr_count_bz)
#maxpos=np.argmax(corr_count_bz)
#get top 50 of all correlations, they are at the end of the array
#top50_bz=np.argsort(corr_count_bz)[-50:-1]
#method with minimum rms distance
maxval_bz=np.nanmin(dist_count_bz)
maxpos_bz=np.argmin(dist_count_bz)
top50_bz=np.argsort(dist_count_bz)[0:49]
print('find minimum of BzGSM distance at index:')
print(round(maxval_bz,1), ' nT index: ',maxpos_bz)
#go forward in time from training data set start to the position of the best match + deltat hours
#(so you take the future part coming after wind where the best match is seen)
indp_bz=startindex+maxpos_bz+deltat
#select array from OMNI data for predicted wind - predictions all have a p at the end
bzp=omni['bz'][indp_bz:indp_bz+deltat+1]
#for V
#method with correlation
#maxval_v=np.max(corr_count_v)
#maxpos_v=np.argmax(corr_count_v)
#top50_v=np.argsort(corr_count_v)[-50:-1]
#use nanmin because nan's might show up in dist_count
#method with minimum rms distance
maxval_v=np.nanmin(dist_count_v)
maxpos_v=np.argmin(dist_count_v)
top50_v=np.argsort(dist_count_v)[0:49]
print('find minimum of V distance at index:')
print(round(maxval_v), ' km/s index: ',maxpos_v)
#select array from OMNI data for predicted wind - all with p at the end
indp_v=startindex+maxpos_v+deltat
speedp=omni['speed'][indp_v:indp_v+deltat+1]
#for N
#maxval_n=np.max(corr_count_n)
#maxpos_n=np.argmax(corr_count_n)
#top50_n=np.argsort(corr_count_n)[-50:-1]
#use nanmin because nan's might show up in dist_count_n
maxval_n=np.nanmin(dist_count_n)
maxpos_n=np.argmin(dist_count_n)
top50_n=np.argsort(dist_count_n)[0:49]
print('find minimum of N distance at index:')
print(round(maxval_n,1), ' ccm-3 index: ',maxpos_n)
#select array from OMNI data for predicted wind - all with p at the end
indp_n=startindex+maxpos_n+deltat
denp=omni['density'][indp_n:indp_n+deltat+1]
#---------- sliding window analysis end
calculation_time=round(time.time()-calculation_start,2)
print('Calculation Time in seconds: ', calculation_time)
#================================== ((3) plot FORECAST results ========================================
sns.set_context("talk")
sns.set_style("darkgrid")
#fig=plt.figure(3,figsize=(15,13))
#for testing
fig=plt.figure(3,figsize=(13,11))
weite=1
fsize=11
#------------------- Panel 1 Btotal
ax1 = fig.add_subplot(411)
#for previous plot best 50 correlations
for j in np.arange(49):
#search for index in OMNI data for each of the top50 entries
indp_b50=startindex+top50_b[j]
btot50=omni['btot'][indp_b50:indp_b50+deltat+1]
#plot for previous times
plt.plot_date(timesnb,btot50, 'lightgrey', linewidth=weite, alpha=0.9)
#plot the now wind
plt.plot_date(timesnb,btotn, 'k', linewidth=weite, label='observation')
#for legend
plt.plot_date(0,0, 'lightgrey', linewidth=weite, alpha=0.8)#,label='50 best B matches')
plt.plot_date(0,0, 'g', linewidth=weite, alpha=0.8)#,label='B predictions from 50 matches')
#for future plot best 50 correlations
for j in np.arange(49):
#search for index in OMNI data for each of the top50 entries,
#add a deltat for selecting the deltat after the data
indp_b50=startindex+top50_b[j]+deltat
btot50=omni['btot'][indp_b50:indp_b50+deltat+1]
#plot for future time
plt.plot_date(timesfb,btot50, 'g', linewidth=weite, alpha=0.4)
#predicted wind best match
plt.plot_date(timesfb,btotp, 'b', linewidth=weite+1, label='prediction')
plt.ylabel('Magnetic field B [nT]', fontsize=fsize+2)
plt.xlim((timesnb[0], timesfb[-1]))
#indicate average level of training data btot
btraining_mean=np.nanmean(omni['btot'][startindex:endindex])
plt.plot_date([timesnp[0], timesfp[-1]], [btraining_mean,btraining_mean],'--k', alpha=0.5, linewidth=1)
plt.annotate('average',xy=(timesnp[0],btraining_mean),xytext=(timesnp[0],btraining_mean),color='k', fontsize=10)
#add *** make ticks in 6h distances starting with 0, 6, 12 UT
myformat = DateFormatter('%Y %b %d %Hh')
ax1.xaxis.set_major_formatter(myformat)
plt.plot_date([timesnb[-1],timesnb[-1]],[0,100],'-r', linewidth=3)
plt.ylim(0,max(btotp)+12)
#ax1.legend(loc=2, fontsize=fsize-2, ncol=2)
plt.annotate('now',xy=(timenow,max(btotp)+12-3),xytext=(timenow+0.01,max(btotp)+12-3),color='r', fontsize=15)
plt.annotate('observation',xy=(timenow,max(btotp)+12-3),xytext=(timenow-0.55,max(btotp)+12-3),color='k', fontsize=15)
plt.annotate('prediction',xy=(timenow,max(btotp)+12-3),xytext=(timenow+0.45,max(btotp)+12-3),color='b', fontsize=15)
plt.yticks(fontsize=fsize)
plt.xticks(fontsize=fsize)
plt.title('PREDSTORM L1 solar wind and magnetic storm prediction with unsupervised pattern recognition for '+ str(num2date(timenow))[0:16]+ ' UT', fontsize=15)
#------------------------ Panel 2 BZ
ax2 = fig.add_subplot(412)
#plot best 50 correlations for now wind
for j in np.arange(49):
#search for index in OMNI data for each of the top50 entries
indp_bz50=startindex+top50_bz[j]
bz50=omni['bz'][indp_bz50:indp_bz50+deltat+1]
#plot for previous times
plt.plot_date(timesnb,bz50, 'lightgrey', linewidth=weite, alpha=0.9)
#this is the observed now wind
plt.plot_date(timesnb,bzgsmn, 'k', linewidth=weite, label='Bz observed by DSCOVR')
#for legend
plt.plot_date(0,0, 'lightgrey', linewidth=weite, alpha=0.8,label='50 best Bz matches')
plt.plot_date(0,0, 'g', linewidth=weite, alpha=0.8,label='Bz predictions from 50 matches')
#for future wind plot best 50 correlations
for j in np.arange(49):
#search for index in OMNI data for each of the top50 entries, add a deltat for selecting | |
if pos >= len(s):
raise IndexError("`pos` %d not in string" % pos)
# *don't* count last '\n', if it is at pos!
line = s.count('\n',0,pos)
if line:
return line + 1, pos - s.rfind('\n',0,pos) - 1
else:
return 1, pos
#_. TERMINAL OUTPUT FUNCTIONS
def prin(*args, **kwargs):
r"""Like ``print``, but a function. I.e. prints out all arguments as
``print`` would do. Specify output stream like this::
print('ERROR', `out="sys.stderr"``).
"""
print >> kwargs.get('out',None), " ".join([str(arg) for arg in args])
__test__['prin'] = r"""
>>> prin(1,2,3,out=None)
1 2 3
"""
def underline(s, lineC='-'):
r"""Return `s` + newline + enough '-' (or lineC if specified) to underline it
and a final newline.
Example:
>>> print underline("TOP SECRET", '*'),
TOP SECRET
**********
"""
return s + "\n" + lineC * len(s) + "\n"
def fitString(s, maxCol=79, newlineReplacement=None):
r"""Truncate `s` if necessary to fit into a line of width `maxCol`
(default: 79), also replacing newlines with `newlineReplacement` (default
`None`: in which case everything after the first newline is simply
discarded).
Examples:
>>> fitString('12345', maxCol=5)
'12345'
>>> fitString('123456', maxCol=5)
'12...'
>>> fitString('a line\na second line')
'a line'
>>> fitString('a line\na second line', newlineReplacement='\\n')
'a line\\na second line'
"""
assert isString(s)
if '\n' in s:
if newlineReplacement is None:
s = s[:s.index('\n')]
else:
s = s.replace("\n", newlineReplacement)
if maxCol is not None and len(s) > maxCol:
s = "%s..." % s[:maxCol-3]
return s
#_. EVIL THINGS
def magicGlobals(level=1):
r"""Return the globals of the *caller*'s caller (default), or `level`
callers up."""
return inspect.getouterframes(inspect.currentframe())[1+level][0].f_globals
def magicLocals(level=1):
r"""Return the locals of the *caller*'s caller (default) , or `level`
callers up.
"""
return inspect.getouterframes(inspect.currentframe())[1+level][0].f_locals
def thisModule():
return sys.modules[sys._getframe(3).f_globals['__name__']]
#_. PERSISTENCE
def __saveVarsHelper(filename, varNamesStr, outOf,extension='.bpickle',**opts):
filename = os.path.expanduser(filename)
if outOf is None: outOf = magicGlobals(2)
if not varNamesStr or not isString(varNamesStr):
raise ValueError, "varNamesStr must be a string!"
varnames = varNamesStr.split()
if not splitext(filename)[1]: filename += extension
if opts.get("overwrite") == 0 and os.path.exists(filename):
raise RuntimeError("File already exists")
return filename, varnames, outOf
def saveVars(filename, varNamesStr, outOf=None, **opts):
r"""Pickle name and value of all those variables in `outOf` (default: all
global variables (as seen from the caller)) that are named in
`varNamesStr` into a file called `filename` (if no extension is given,
'.bpickle' is appended). Overwrites file without asking, unless you
specify `overwrite=0`. Load again with `loadVars`.
Thus, to save the global variables ``bar``, ``foo`` and ``baz`` in the
file 'savedVars' do::
saveVars('savedVars', 'bar foo baz')
"""
filename, varnames, outOf = __saveVarsHelper(
filename, varNamesStr, outOf, **opts)
print "pickling:\n", "\n".join(sorted(varnames))
try:
f = None
f = open(filename, "wb")
cPickle.dump(dict(zip(varnames, atIndices(outOf, varnames))),
f, 1) # UGH: cPickle, unlike pickle doesn't accept bin=1
finally:
if f: f.close()
#FIXME untested
def saveDict(filename, d, **kwargs):
saveVars(filename, " ".join(d.keys()), outOf=d, **kwargs)
def addVars(filename, varNamesStr, outOf=None):
r"""Like `saveVars`, but appends additional variables to file."""
filename, varnames, outOf = __saveVarsHelper(filename, varNamesStr, outOf)
f = None
try:
f = open(filename, "rb")
h = cPickle.load(f)
f.close()
h.update(dict(zip(varnames, atIndices(outOf, varnames))))
f = open(filename, "wb")
cPickle.dump( h, f , 1 )
finally:
if f: f.close()
def loadDict(filename):
"""Return the variables pickled pickled into `filename` with `saveVars`
as a dict."""
filename = os.path.expanduser(filename)
if not splitext(filename)[1]: filename += ".bpickle"
f = None
try:
f = open(filename, "rb")
varH = cPickle.load(f)
finally:
if f: f.close()
return varH
def loadVars(filename, ask=True, into=None, only=None):
r"""Load variables pickled with `saveVars`.
Parameters:
- `ask`: If `True` then don't overwrite existing variables without
asking.
- `only`: A list to limit the variables to or `None`.
- `into`: The dictionary the variables should be loaded into (defaults
to global dictionary).
"""
filename = os.path.expanduser(filename)
if into is None: into = magicGlobals()
varH = loadDict(filename)
toUnpickle = only or varH.keys()
alreadyDefined = filter(into.has_key, toUnpickle)
if alreadyDefined and ask:
print "The following vars already exist; overwrite (yes/NO)?\n",\
"\n".join(alreadyDefined)
if raw_input() != "yes":
toUnpickle = without(toUnpickle, alreadyDefined)
if not toUnpickle:
print "nothing to unpickle"
return None
print "unpickling:\n",\
"\n".join(sorted(toUnpickle))
for k in varH.keys():
if k not in toUnpickle:
del varH[k]
into.update(varH)
#_. SCRIPTING
def runInfo(prog=None,vers=None,date=None,user=None,dir=None,args=None):
r"""Create a short info string detailing how a program was invoked. This is
meant to be added to a history comment field of a data file were it is
important to keep track of what programs modified it and how.
!!!:`args` should be a **``list``** not a ``str``."""
return "%(prog)s %(vers)s;" \
" run %(date)s by %(usr)s in %(dir)s with: %(args)s'n" % \
mkDict(prog=prog or sys.argv[0],
vers=vers or magicGlobals().get("__version__", ""),
date=date or isoDateTimeStr(),
usr=user or getpass.getuser(),
dir=dir or os.getcwd(),
args=" ".join(args or sys.argv))
class DryRun:
"""A little class that is usefull for debugging and testing and for
programs that should have a "dry run" option.
Examples:
>>> import sys
>>> from os import system
>>> dry = True
>>> # that's how you can switch the programms behaviour between dry run
>>>
>>> if dry:
... # (`out` defaults to stdout, but we need it here for doctest)
... run = DryRun(dry=True, out=sys.stdout)
... else:
... run = DryRun(dry=False, out=sys.stdout)
...
>>> ## IGNORE 2 hacks to get doctest working, please ignore
>>> def system(x): print "hallo"
...
>>> run.__repr__ = lambda : "<awmstools.DryRun instance at 0x8222d6c>"
>>> ## UNIGNORE
>>> run(system, 'echo "hallo"')
system('echo "hallo"')
>>> # now let's get serious
>>> run.dry = False
>>> run(system, 'echo "hallo"')
hallo
>>> # just show what command would be run again
>>> run.dry = True
>>> # You can also specify how the output for a certain function should be
... # look like, e.g. if you just want to print the command for all system
... # calls, do the following:
>>> run.addFormatter(system, lambda x,*args, **kwargs: args[0])
<awmstools.DryRun instance at 0x8222d6c>
>>> run(system, 'echo "hallo"')
echo "hallo"
>>> # Other functions will still be formated with run.defaultFormatter, which
... # gives the following results
>>> run(int, "010101", 2)
int('010101', 2)
>>> # Switch to wet run again:
>>> run.dry = False
>>> run(int, "010101", 2)
21
>>>
Caveats:
- remember that arguments might look different from what you specified in
the source-code, e.g::
>>> run.dry = True
>>> run(chr, 0x50)
chr(80)
>>>
- 'DryRun(showModule=True)' will try to print the module name where func
was defined, this might however *not* produce the results you
expected. For example, a function might be defined in another module
than the one from which you imported it:
>>> run = DryRun(dry=True, showModule=True)
>>> run(os.path.join, "path", "file")
posixpath.join('path', 'file')
>>>
see `DryRun.__init__` for more details."""
def __init__(self, dry=True, out=None, showModule=False):
"""
Parameters:
- `dry` : specifies whether to do a try run or not.
- `out` : is the stream to which all dry runs will be printed
(default stdout).
- `showModule` : specifies whether the name of a module in which a
function was defined is printed (if known).
"""
self.dry = dry
self.formatterDict = {}
self.out = out
self.showModule = showModule
def defaultFormatter(func, *args, **kwargs):
if self.showModule and inspect.getmodule(func):
funcName = inspect.getmodule(func).__name__ + '.' + func.__name__
else:
funcName = func.__name__
return "%s(%s)" % (funcName,
", ".join(map(repr,args) + map(lambda x: "%s=%s" %
tuple(map(repr,x)), kwargs.items())))
self.defaultFormatter = defaultFormatter
def __call__(self, func, *args, **kwargs):
"""Shorcut for self.run."""
return self.run(func, *args, **kwargs)
def run(self, func, *args, **kwargs):
"""Same as ``self.dryRun`` if ``self.dry``, else same as ``self.wetRun``."""
if self.dry:
return self.dryRun(func, *args, **kwargs)
else:
return self.wetRun(func, *args, **kwargs)
def addFormatter(self, func, formatter):
"""Sets the function that is used to format calls to func. formatter is a
function that is supposed to take these arguments: `func`, `*args` and
`**kwargs`."""
self.formatterDict[func] = formatter
return self
def dryRun(self, func, *args, **kwargs):
"""Instead of running function with `*args` and `**kwargs`, just print
out the function call."""
print >> self.out, \
self.formatterDict.get(func, self.defaultFormatter)(func, *args, **kwargs)
def wetRun(self, func, *args, **kwargs):
"""Run function with ``*args`` and ``**kwargs``."""
return func(*args, **kwargs)
#_. DEBUGGING/INTERACTIVE DEVELOPMENT
def makePrintReturner(pre="", post="" ,out=None):
r"""Creates functions that print out their argument, (between optional
`pre` and `post` strings) and return it unmodified. This is usefull for
debugging e.g. parts of expressions, without having to modify the behavior
of the program.
Example:
>>> makePrintReturner(pre="The value is:", post="[returning]")(3)
The value is: 3 [returning]
| |
set consumer to read from start.
etl_config_path = os.path.join(_get_project_dir_path(project_name), 'working_dir/etl_config.json')
if not os.path.exists(etl_config_path):
etl_config = {
"input_args": {
"auto_offset_reset": "earliest",
"fetch_max_bytes": 52428800,
"max_partition_fetch_bytes": 10485760,
"max_poll_records": 10
},
"output_args": {
"max_request_size": 10485760,
"compression_type": "gzip"
}
}
write_to_file(json.dumps(etl_config, indent=2), etl_config_path)
# 3. sandpaper
# 3.1 delete previous index
url = '{}/{}'.format(
config['es']['sample_url'],
project_name
)
try:
resp = requests.delete(url, timeout=10)
except:
pass # ignore no index error
# 3.2 create new index
url = '{}/mapping?url={}&project={}&index={}&endpoint={}'.format(
config['sandpaper']['url'],
config['sandpaper']['ws_url'],
project_name,
data[project_name]['master_config']['index']['sample'],
config['es']['sample_url']
)
resp = requests.put(url, timeout=10)
if resp.status_code // 100 != 2:
return rest.internal_error('failed to create index in sandpaper')
# 3.3 switch index
url = '{}/config?url={}&project={}&index={}&endpoint={}'.format(
config['sandpaper']['url'],
config['sandpaper']['ws_url'],
project_name,
data[project_name]['master_config']['index']['sample'],
config['es']['sample_url']
)
resp = requests.post(url, timeout=10)
if resp.status_code // 100 != 2:
return rest.internal_error('failed to switch index in sandpaper')
# 4. clean up added data status
logger.info('re-add data')
with data[project_name]['locks']['status']:
if 'added_docs' not in data[project_name]['status']:
data[project_name]['status']['added_docs'] = dict()
for tld in data[project_name]['status']['added_docs'].iterkeys():
data[project_name]['status']['added_docs'][tld] = 0
with data[project_name]['locks']['data']:
for tld in data[project_name]['data'].iterkeys():
for doc_id in data[project_name]['data'][tld]:
data[project_name]['data'][tld][doc_id]['add_to_queue'] = False
set_status_dirty(project_name)
# 5. restart extraction
data[project_name]['data_pushing_worker'].stop_adding_data = False
return Actions.etk_extract(project_name)
@staticmethod
def reload_blacklist(project_name):
if project_name not in data:
return rest.not_found('project {} not found'.format(project_name))
# 1. kill etk
if not Actions._etk_stop(project_name):
return rest.internal_error('failed to kill_etk in ETL')
# 2. generate etk config
Actions._generate_etk_config(project_name)
# 3. fetch and re-add data
t = threading.Thread(target=Data._reload_blacklist_worker, args=(project_name,), name='reload_blacklist')
t.start()
data[project_name]['threads'].append(t)
return rest.accepted()
@staticmethod
def _reload_blacklist_worker(project_name):
# copy here to avoid modification while iteration
for field_name, field_obj in data[project_name]['master_config']['fields'].items():
if 'blacklists' not in field_obj or len(field_obj['blacklists']) == 0:
continue
# get all stop words and generate query
# only use the last blacklist if there are multiple blacklists
blacklist = data[project_name]['master_config']['fields'][field_name]['blacklists'][-1]
file_path = os.path.join(_get_project_dir_path(project_name),
'glossaries', '{}.txt'.format(blacklist))
query_conditions = []
with codecs.open(file_path, 'r') as f:
for line in f:
key = line.strip()
if len(key) == 0:
continue
query_conditions.append(
'{{ "term": {{"knowledge_graph.{field_name}.key": "{key}"}} }}'
.format(field_name=field_name, key=key))
query = """
{{
"size": 1000,
"query": {{
"bool": {{
"should": [{conditions}]
}}
}},
"_source": ["doc_id", "tld"]
}}
""".format(conditions=','.join(query_conditions))
logger.debug(query)
# init query
scroll_alive_time = '1m'
es = ES(config['es']['sample_url'])
r = es.search(project_name, data[project_name]['master_config']['root_name'], query,
params={'scroll': scroll_alive_time}, ignore_no_index=False)
if r is None:
return
scroll_id = r['_scroll_id']
Actions._re_add_docs(r, project_name)
# scroll queries
while True:
# use the es object here directly
r = es.es.scroll(scroll_id=scroll_id, scroll=scroll_alive_time)
if r is None:
break
if len(r['hits']['hits']) == 0:
break
Actions._re_add_docs(r, project_name)
Actions.etk_extract(project_name)
@staticmethod
def _re_add_docs(resp, project_name):
input_topic = project_name + '_in'
for obj in resp['hits']['hits']:
doc_id = obj['_source']['doc_id']
tld = obj['_source']['tld']
try:
logger.info('re-add doc %s (%s)', doc_id, tld)
ret, msg = Actions._publish_to_kafka_input_queue(
doc_id, data[project_name]['data'][tld][doc_id], kafka_producer, input_topic)
if not ret:
logger.error('Error of re-adding data to Kafka: %s', msg)
except Exception as e:
logger.exception('error in re_add_docs')
@staticmethod
def etk_extract(project_name, clean_up_queue=False):
if Actions._is_etk_running(project_name):
return rest.exists('already running')
# etk_config_file_path = os.path.join(
# _get_project_dir_path(project_name), 'working_dir/etk_config.json')
# if not os.path.exists(etk_config_file_path):
# return rest.not_found('No etk config')
# recreate etk config every time
Actions._generate_etk_config(project_name)
url = '{}/{}'.format(
config['es']['sample_url'],
project_name
)
try:
resp = requests.get(url, timeout=10)
if resp.status_code // 100 != 2:
return rest.not_found('No es index')
except Exception as e:
return rest.not_found('No es index')
url = config['etl']['url'] + '/run_etk'
payload = {
'project_name': project_name,
'number_of_workers': config['etl']['number_of_workers']
}
if clean_up_queue:
payload['input_offset'] = 'seek_to_end'
payload['output_offset'] = 'seek_to_end'
resp = requests.post(url, json.dumps(payload), timeout=config['etl']['timeout'])
if resp.status_code // 100 != 2:
return rest.internal_error('failed to run_etk in ETL')
return rest.accepted()
@staticmethod
def _etk_stop(project_name, wait_till_kill=True, clean_up_queue=False):
url = config['etl']['url'] + '/kill_etk'
payload = {
'project_name': project_name
}
if clean_up_queue:
payload['input_offset'] = 'seek_to_end'
payload['output_offset'] = 'seek_to_end'
resp = requests.post(url, json.dumps(payload), timeout=config['etl']['timeout'])
if resp.status_code // 100 != 2:
logger.error('failed to kill_etk in ETL')
return False
if wait_till_kill:
while True:
time.sleep(5)
if not Actions._is_etk_running(project_name):
break
return True
@staticmethod
def _publish_to_kafka_input_queue(doc_id, catalog_obj, producer, topic):
try:
with codecs.open(catalog_obj['json_path'], 'r') as f:
doc_obj = json.loads(f.read())
with codecs.open(catalog_obj['raw_content_path'], 'r') as f:
doc_obj['raw_content'] = f.read() # .decode('utf-8', 'ignore')
except Exception as e:
logger.exception('error in reading file from catalog')
return False, 'error in reading file from catalog'
try:
r = producer.send(topic, doc_obj)
r.get(timeout=60) # wait till sent
logger.info('sent %s to topic %s', doc_id, topic)
except Exception as e:
logger.exception('error in sending data to kafka queue')
return False, 'error in sending data to kafka queue'
return True, ''
class DataPushingWorker(threading.Thread):
def __init__(self, project_name, sleep_interval):
super(DataPushingWorker, self).__init__()
self.project_name = project_name
self.exit_signal = False
self.stop_adding_data = False
self.is_adding_data = False
self.sleep_interval = sleep_interval
# set up input kafka
self.producer = kafka_producer
self.input_topic = project_name + '_in'
def get_status(self):
return {
'stop_adding_data': self.stop_adding_data,
'is_adding_data': self.is_adding_data,
'sleep_interval': self.sleep_interval
}
def run(self):
logger.info('thread DataPushingWorker running... %s', self.project_name)
while not self.exit_signal:
if not self.stop_adding_data:
self._add_data_worker(self.project_name, self.producer, self.input_topic)
# wait interval
t = self.sleep_interval * 10
while t > 0 and not self.exit_signal:
time.sleep(0.1)
t -= 1
def _add_data_worker(self, project_name, producer, input_topic):
got_lock = data[project_name]['locks']['data'].acquire(False)
try:
# print '_add_data_worker got data lock?', got_lock
if not got_lock or self.stop_adding_data:
return
for tld in data[project_name]['data'].iterkeys():
if self.stop_adding_data:
break
with data[project_name]['locks']['status']:
if tld not in data[project_name]['status']['added_docs']:
data[project_name]['status']['added_docs'][tld] = 0
if tld not in data[project_name]['status']['desired_docs']:
data[project_name]['status']['desired_docs'][tld] = \
data[project_name]['master_config'].get('default_desired_num', 0)
if tld not in data[project_name]['status']['total_docs']:
data[project_name]['status']['total_docs'][tld] = 0
added_num = data[project_name]['status']['added_docs'][tld]
total_num = data[project_name]['status']['total_docs'][tld]
desired_num = data[project_name]['status']['desired_docs'][tld]
desired_num = min(desired_num, total_num)
# only add docs to queue if desired num is larger than added num
if desired_num > added_num:
self.is_adding_data = True
# update mark in catalog
num_to_add = desired_num - added_num
added_num_this_round = 0
for doc_id in data[project_name]['data'][tld].iterkeys():
if not self.stop_adding_data:
# finished
if num_to_add <= 0:
break
# already added
if data[project_name]['data'][tld][doc_id]['add_to_queue']:
continue
# mark data
data[project_name]['data'][tld][doc_id]['add_to_queue'] = True
num_to_add -= 1
added_num_this_round += 1
# publish to kafka queue
ret, msg = Actions._publish_to_kafka_input_queue(
doc_id, data[project_name]['data'][tld][doc_id], producer, input_topic)
if not ret:
logger.error('Error of pushing data to Kafka: %s', msg)
# roll back
data[project_name]['data'][tld][doc_id]['add_to_queue'] = False
num_to_add += 1
added_num_this_round -= 1
self.is_adding_data = False
if added_num_this_round > 0:
with data[project_name]['locks']['status']:
data[project_name]['status']['added_docs'][tld] = added_num + added_num_this_round
set_catalog_dirty(project_name)
set_status_dirty(project_name)
except Exception as e:
logger.exception('exception in Actions._add_data_worker() data lock')
finally:
if got_lock:
data[project_name]['locks']['data'].release()
class MemoryDumpWorker(threading.Thread):
def __init__(self, project_name, sleep_interval, function, kwargs=dict()):
super(MemoryDumpWorker, self).__init__()
self.project_name = project_name
self.exit_signal = False
init_time = time.time()
self.file_timestamp = init_time
self.memory_timestamp = init_time
self.sleep_interval = sleep_interval
self.function = function
self.kwargs = kwargs
def get_status(self):
return {
'sleep_interval': self.sleep_interval,
'file_timestamp': self.file_timestamp,
'memory_timestamp': self.memory_timestamp,
'is_dirty': self.file_timestamp != self.memory_timestamp
}
def run_function(self):
# print self.memory_timestamp, self.file_timestamp
memory_timestamp = self.memory_timestamp
if self.file_timestamp < memory_timestamp:
self.function(**self.kwargs)
self.file_timestamp = memory_timestamp
def run(self):
logger.info('thread MemoryDumpWorker (%s) running... %s', self.function.__name__, self.project_name)
while not self.exit_signal:
self.run_function()
# wait interval
t = self.sleep_interval * 10
while t > 0 and not self.exit_signal:
time.sleep(0.1)
t -= 1
# make sure memory data is dumped
self.run_function()
def ensure_sandpaper_is_on():
try:
# make sure es in on
url = config['es']['sample_url']
resp = requests.get(url)
# make sure sandpaper is on
url = config['sandpaper']['url']
resp = requests.get(url)
except requests.exceptions.ConnectionError:
# es if not online, retry
time.sleep(5)
ensure_sandpaper_is_on()
def ensure_etl_engine_is_on():
try:
url = config['etl']['url']
resp = requests.get(url, timeout=config['etl']['timeout'])
except requests.exceptions.ConnectionError:
# es if not online, retry
time.sleep(5)
ensure_etl_engine_is_on()
def ensure_kafka_is_on():
global kafka_producer
try:
kafka_producer = KafkaProducer(
bootstrap_servers=config['kafka']['servers'],
max_request_size=10485760,
value_serializer=lambda v: json.dumps(v).encode('utf-8'),
compression_type='gzip'
)
except NoBrokersAvailable as e:
time.sleep(5)
ensure_kafka_is_on()
def graceful_killer(signum, frame):
logger.info('SIGNAL #%s received, notifying threads to exit...', signum)
for project_name in data.iterkeys():
try:
stop_threads_and_locks(project_name)
except:
pass
logger.info('threads exited, exiting main thread...')
sys.exit()
def start_threads_and_locks(project_name):
data[project_name]['locks']['data'] = threading.Lock()
data[project_name]['locks']['status'] = threading.Lock()
data[project_name]['locks']['catalog_log'] = threading.Lock()
data[project_name]['data_pushing_worker'] = DataPushingWorker(
project_name, config['data_pushing_worker_backoff_time'])
data[project_name]['data_pushing_worker'].start()
data[project_name]['status_memory_dump_worker'] = MemoryDumpWorker(
project_name, config['status_memory_dump_backoff_time'],
update_status_file, kwargs={'project_name': project_name})
data[project_name]['status_memory_dump_worker'].start()
data[project_name]['catalog_memory_dump_worker'] = MemoryDumpWorker(
project_name, config['catalog_memory_dump_backoff_time'],
update_catalog_file, kwargs={'project_name': project_name})
data[project_name]['catalog_memory_dump_worker'].start()
def stop_threads_and_locks(project_name):
try:
data[project_name]['data_pushing_worker'].exit_signal = True
data[project_name]['data_pushing_worker'].join()
data[project_name]['status_memory_dump_worker'].exit_signal = True
data[project_name]['status_memory_dump_worker'].join()
data[project_name]['catalog_memory_dump_worker'].exit_signal = True
data[project_name]['catalog_memory_dump_worker'].join()
logger.info('threads of project %s exited', project_name)
except:
pass
if __name__ == '__main__':
try:
# prerequisites
logger.info('ensure sandpaper is on...')
ensure_sandpaper_is_on()
logger.info('ensure etl engine is on...')
ensure_etl_engine_is_on()
logger.info('ensure kafka is on...')
ensure_kafka_is_on()
logger.info('register signal handler...')
signal.signal(signal.SIGINT, graceful_killer)
signal.signal(signal.SIGTERM, graceful_killer)
# init
for project_name in os.listdir(config['repo']['local_path']):
project_dir_path = _get_project_dir_path(project_name)
if os.path.isdir(project_dir_path) and \
not (project_name.startswith('.') or project_name.startswith('_')):
data[project_name] = templates.get('project')
logger.info('loading project %s...', project_name)
# master config
master_config_file_path = os.path.join(project_dir_path, 'master_config.json')
if not os.path.exists(master_config_file_path):
logger.error('Missing master_config.json file for ' + project_name)
| |
<filename>src/game/engine.py
import asyncio
import math
import random
import time
import traceback
import pygame
import pymunk
import pymunk.pygame_util
from . import category, collision_type
from .entities.ball import Ball
from .entities.border import Border
from .entities.entity_type import EntityType
from .entities.player import Player
from .entities.spawner import Spawner
from .events import Error, MoveBar, MovePlayer, Sound
class Engine:
def __init__(
self, debug=False, is_server=True, is_client=True, ignore_self_control=False
):
"""Does not run until `#run` is called"""
self.created_timestamp = time.time()
self.running = True
self.debug_render = None
self.ignore_self_control = ignore_self_control
self.is_server = is_server
self.is_client = True
self.tickcount = 0
self._hooks = {}
self._hooks[self.process_event] = self.process_event
self.entities = {}
self.walls = []
self.key_map = {}
# Set initial keymap
self.key_map[MovePlayer.UP] = False
self.key_map[MovePlayer.DOWN] = False
self.key_map[MovePlayer.LEFT] = False
self.key_map[MovePlayer.RIGHT] = False
self.key_map[MoveBar.UP] = False
self.key_map[MoveBar.DOWN] = False
self.key_map[MoveBar.LEFT] = False
self.key_map[MoveBar.RIGHT] = False
self.playerUUID = ""
# def c(n, v):
# if n == Sound.ID:
# print(v)
# self._hooks[c] = c
self.width = 600
self.height = 600
# Game engine tick rate
self.tps = 100
# Controller tick rate
# ticks to controller
# a value of 5 would mean it will poll the controller every 5th tick
self.ttc = 5
self.ttc_tick = 0
self.ball_update_tick = 0
self.space = pymunk.Space()
self.space.gravity = 0, 0
self.coroutine = None
self.space.static_body.filter = pymunk.ShapeFilter(
categories=category.WALL, mask=category.MASK.WALL
)
self.space.static_body
self.player = None
# b = pymunk.Body(1, 1)
# self.space.add(b)
# c = pymunk.Circle(b, 10)
# b.position = (100, 100)
# self.space.add(c)
# print(b, c)
# p = Player()
# self.player = p
# p.position = (100, 200)
# p.add_space(self.space)
# self.register_entity(p)
s = Spawner(100)
s.position = (self.width // 2, self.height // 2)
s.add_space(self.space)
self.register_entity(s)
# try:
# self.ball = Ball()
# self.ball.position = (10 * 2 + 10, 300)
# self.ball.velocity = (0, 100)
# self.ball.angular_velocity = random.random() * 1000
# self.ball.add_space(self.space)
# self.register_entity(self.ball)
# except Exception as e:
# print(e)
# Test bounding box
bb = pymunk.BB(50, 300, 150, 150)
# self.space.add(bb)
self.load_mapdata()
# Add custom tick method so debug has an option to use it
self.control = lambda _=0: 0
# FIXME remove this as this is only used for checking stuff
self.temp = None
if debug:
# Process callbacks
def process_key(key):
"""Process key events passed from pygame window"""
if self.player is None:
return
if key == pygame.K_SPACE:
print("space pressed")
self.temp = self.player.dump_data()
elif key == pygame.K_l:
print("loading data")
self.player.load_data(self.temp)
# Process routines
def routine():
try:
p = pygame.key.get_pressed()
keys = {}
keys[MovePlayer.UP] = p[pygame.K_w]
keys[MovePlayer.DOWN] = p[pygame.K_s]
keys[MovePlayer.LEFT] = p[pygame.K_a]
keys[MovePlayer.RIGHT] = p[pygame.K_d]
keys[MoveBar.UP] = p[pygame.K_UP]
keys[MoveBar.DOWN] = p[pygame.K_DOWN]
keys[MoveBar.LEFT] = p[pygame.K_LEFT]
keys[MoveBar.RIGHT] = p[pygame.K_RIGHT]
self._emit(MovePlayer.ID, keys)
self._emit(MoveBar.ID, keys)
except Exception as e:
print(e)
self._emit(Error.ID, f"{e}")
self.control = routine
self.debug_render = DebugRender(self.space, self.destroy, process_key)
def on_hitbox_ball_hit(arbiter, space, data):
"""`arbiter.shapes[0]` is hitbox, `arbiter.shapes[1]` is ball"""
if not (
(arbiter.shapes[0].horizontal and arbiter.shapes[0].body.horizontal)
or (
not arbiter.shapes[0].horizontal
and not arbiter.shapes[0].body.horizontal
)
):
self._emit(Sound.ID, Sound.PLAYER_DAMAGE)
self.space.remove(
*arbiter.shapes[1].body.tuple,
*arbiter.shapes[0].body.bcb,
*arbiter.shapes[0].body.bb,
)
self.remove_entity(arbiter.shapes[0].body)
self.remove_entity(arbiter.shapes[1].body)
return True
return False
ch = self.space.add_collision_handler(collision_type.BALL, collision_type.WALL)
ch = self.space.add_collision_handler(
collision_type.HITBOX, collision_type.BALL
)
ch.pre_solve = on_hitbox_ball_hit
def on_collision_ball_hit(arbiter, space, data):
self._emit(Sound.ID, Sound.PADDLE_BOUNCE)
ball = arbiter.shapes[0].body
player = arbiter.shapes[1].body
ball.ownerUUID = player.uuid
def _map(p, x1, x2, dx1, dx2) -> float: # A simple range mapper
return ((dx2 - dx1) * ((p - x1) / (x2 - x1))) + dx1
poly = arbiter.shapes[0]
collided = arbiter.shapes[1]
# print("colided: ", collided)
space_vertices = []
for v in poly.get_vertices():
x, y = v.rotated(poly.body.angle) + poly.body.position
space_vertices.append((x, y))
cx, cy = arbiter.contact_point_set.points[0].point_a # Contact points
# print("Contact Points: (x, y): ", cx, cy)
# Actual shape corners
x1, y1 = space_vertices[0]
x2, y2 = space_vertices[2]
# print("World Corners (x1, x2): ", x1, x2)
# print("World Corners (y1, y2): ", y1, y2)
# Local shpe corners
lx1, ly1 = x1 - x1, y1 - y1
lx2, ly2 = x2 - x1, y2 - y1
lcx, lcy = cx - x1, cy - y1
# print("Local Corners (x1, x2): ", lx1, lx2)
# print("Local Corners (y1, y2): ", ly1, ly2)
# print("Local Contact (x, y): ", lcx, lcy)
w = abs(lx2 - lx1) # Width
h = abs(ly2 - ly1) # Height
# print("Width & Height: ", w, h)
# Biased coordinates
tx1, tx2 = lx1 - w // 2, lx2 - w // 2
ty1, ty2 = ly1 - h // 2, ly2 - h // 2
tcx, tcy = lcx - w // 2, lcy - h // 2
# print("Biased Range (x1, x2): ", tx1, tx2)
# print("Biased Range (y1, y2): ", ty1, ty2)
# print("Biased Contact (x, y): ", tcx, tcy)
# Angular range
a1, a2 = -90, 90
# print("Angular range: ", a1, a2)
# Interpolated coordinates
mx, my = _map(tcx, tx1, tx2, a1, a2), _map(tcy, ty1, ty2, a1, a2)
# print("Mapped: ", mx, my)
# Setting vector directions manually
dirx = 1 if mx > 0 else -1
if x2 - cx == 0: # Right collision
mx *= dirx # Make mx > 0
elif x2 - cx < 0: # Left collision
mx *= -dirx # Make mx < 0
diry = 1 if my > 0 else -1
if y2 - cy == 0: # Lower collision
my *= diry # Make my > 0
elif y2 - cy < 0: # Upper collision
my *= -diry # Make my < 0
# print("Directed: ", mx, my)
magnitude = 70 # In order to get real velocity data
nx, ny = (
math.sin(math.radians(mx)) * magnitude,
math.sin(math.radians(my)) * magnitude,
)
# print("Velocity: ", nx, ny)
collided.body.velocity = nx, ny
ch_collision_box = self.space.add_collision_handler(
collision_type.BALL_COLLISION_BOX, collision_type.BALL
)
ch_collision_box.post_solve = on_collision_ball_hit
def on_collision_ball_bounce(arbiter, space, data):
ball = arbiter.shapes[0].body
if ball.is_last_bounce():
self.space.remove(*ball.tuple)
self.remove_entity(ball)
# self.space.add_post_step_callback(
# self.space._remove_body, self.entities[ball.uuid]
# )
# remove the ball
ball.bounce_count += 1
ch_collision_wall = self.space.add_collision_handler(
collision_type.WALL, collision_type.BALL
)
ch_collision_wall.post_solve = on_collision_ball_bounce
def on_collision_ball_strike(arbiter, space, data): # FIXME change the name
# print("huura")
return True
ch_collision_border = self.space.add_collision_handler(
collision_type.BORDER, collision_type.BALL
)
ch_collision_border.post_solve = on_collision_ball_strike
# def reset(self):
# for body in self.entities.values():
# body.reset()
# if body.type == "player":
# self.space.remove(*list(set.union(*body.tuple)))
# else:
# self.space.remove(*body.tuple)
def dump(self):
# Dumps all entities
li = {}
for entity in list(self.entities.values()):
li[str(entity.uuid)] = entity.dump_data()
return li
def load(self, data):
# return
self.ball_update_tick = self.ball_update_tick % 10 + 1
# Loads all entities
processed_entities = set(self.entities.keys())
# print(len(processed_entities))
for entity_uuid, entity in data.items():
processed_entities.discard(entity_uuid)
if entity_uuid in self.entities:
self.entities[entity_uuid].load_data(entity)
continue
# Create the entity
# continue
if entity["type"] == EntityType.PLAYER:
self.add_player(entity_uuid)
elif entity["type"] == EntityType.BALL:
if entity["update_id"] != self.ball_update_tick:
continue
ball = Ball(entity_uuid)
ball.load_data(entity)
ball.add_space(self.space)
self.register_entity(ball)
for d in processed_entities:
entity = self.entities.get(d, None)
self.entities.pop(d, None)
if entity is None:
continue
try:
if entity.type == EntityType.PLAYER:
self.space.remove(*entity.bb, *entity.bcb)
self.remove_entity(entity)
elif entity.type == EntityType.BALL:
self.space.remove(*entity.tuple)
self.remove_entity(entity)
except Exception as e:
print(e)
# Just loop everything
def update_keymap(self, a, b, c, d, e, f, g, h):
# Set initial keymap
self.key_map[MovePlayer.UP] = a
self.key_map[MovePlayer.DOWN] = b
self.key_map[MovePlayer.LEFT] = c
self.key_map[MovePlayer.RIGHT] = d
self.key_map[MoveBar.UP] = e
self.key_map[MoveBar.DOWN] = f
self.key_map[MoveBar.LEFT] = g
self.key_map[MoveBar.RIGHT] = h
# def stop(self):
# for body in self.entities.values():
# if body.body_type != 2: # if not static
# body.sleep()
# return True
# def start(self):
# for body in self.entities.values():
# if body.body_type != 2: # if not static
# body.activate()
# return True
def get_player_count(self):
return sum(1 for body in self.entities.values() if body.type == "player")
def remove_player(self, uuid):
if uuid not in self.entities: # check if entity.type == player
return
self.space.remove(self.entities[uuid])
self.remove_entity(self.entities[uuid])
return True
def is_player_bordered(self):
pass
def add_player(self, uuid=None, owner=False):
p = Player(uuid)
p.position = (100, 200)
self.player = p
if owner:
self.playerUUID = uuid
p.add_space(self.space)
self.register_entity(p)
return p.uuid
def is_dead(self):
if self.entities.get(self.playerUUID, None) is None:
return True
return False
def load_mapdata(self):
"""
We are NOT going to pass this through websockets. TLDR; downloading maps is impossible
This is a method for dividing up modules so code does not get clogged up
"""
from .default_map import data
for obj in data:
| |
import re
import unittest
from unittest.mock import Mock
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.asa.show_interface import ShowInterfaceSummary, \
ShowInterfaceIpBrief, \
ShowInterfaceDetail
# =============================================
# Parser for 'show interface summary'
# =============================================
class test_show_interface_summary(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'interfaces': {
'Vlan100': {
'interface_state': True,
'config_status': True,
'name': 'pod10',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
}
}
}
golden_output = {'execute.return_value': '''
ciscoasa/admin# show interface summary
Interface Vlan100 "pod10", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
'''}
golden_parsed_output_2 = {
'interfaces': {
'Vlan300': {
'interface_state': True,
'config_status': True,
'name': 'admin-out',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan400': {
'interface_state': True,
'config_status': True,
'name': 'pod10-in',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan500': {
'interface_state': True,
'config_status': False,
'config_issue': 'nameif',
'name': '',
'link_status': True,
'line_protocol': True,
},
'Vlan600': {
'interface_state': True,
'config_status': True,
'name': 'adminTEST',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan700': {
'interface_state': True,
'config_status': True,
'name': 'pod1',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan902': {
'interface_state': True,
'config_status': True,
'name': 'pod2',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan900': {
'interface_state': True,
'config_status': True,
'name': 'pod3',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan1000': {
'interface_state': True,
'config_status': True,
'name': 'pod4',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan1100': {
'interface_state': True,
'config_status': True,
'name': 'pod5',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan1200': {
'interface_state': True,
'config_status': True,
'name': 'pod6',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan1300': {
'interface_state': True,
'config_status': True,
'name': 'pod7',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan1400': {
'interface_state': True,
'config_status': True,
'name': 'pod8',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan1500': {
'interface_state': True,
'config_status': True,
'name': 'pod9',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
},
'Vlan1600': {
'interface_state': False,
'config_status': True,
'name': 'pod249',
'link_status': False,
'line_protocol': False,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0'
}
}
}
golden_output_2 = {'execute.return_value': '''
ciscoasa/admin# show interface summary
Interface Vlan300 "admin-out", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan400 "pod10-in", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan500 "", is up, line protocol is up
Available but not configured via nameif
Interface Vlan600 "adminTEST", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan700 "pod1", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan902 "pod2", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan900 "pod3", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan1000 "pod4", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan1100 "pod5", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan1200 "pod6", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan1300 "pod7", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan1400 "pod8", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan1500 "pod9", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Interface Vlan1600 "pod249", is down, line protocol is down
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
interface_obj = ShowInterfaceSummary(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfaceSummary(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output)
def test_golden_2(self):
self.device = Mock(**self.golden_output_2)
interface_obj = ShowInterfaceSummary(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output_2)
# =============================================
# Parser for 'show interface ip brief'
# =============================================
class test_show_interface_ip_brief(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'interfaces': {
'Control0/0': {
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'check': 'YES',
'method': 'CONFIG',
'link_status': 'up',
'line_protocol': 'up'
},
'GigabitEthernet0/0': {
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'check': 'YES',
'method': 'CONFIG',
'link_status': 'up',
'line_protocol': 'up'
},
'GigabitEthernet0/1': {
'ipv4': {
'unnumbered': {
'unnumbered_intf_ref': 'unassigned'
},
},
'check': 'YES',
'method': 'unset admin',
'link_status': 'down',
'line_protocol': 'down'
},
'GigabitEthernet0/2': {
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'check': 'YES',
'method': 'manual admin',
'link_status': 'down',
'line_protocol': 'down'
},
'GigabitEthernet0/3': {
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'check': 'YES',
'method': 'DHCP admin',
'link_status': 'down',
'line_protocol': 'down'
},
'Management0/0': {
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'check': 'YES',
'method': 'CONFIG',
'link_status': 'up'
},
'Vlan150': {
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'check': 'YES',
'method': 'CONFIG',
'link_status': 'up',
'line_protocol': 'up'
},
'Vlan160': {
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'check': 'YES',
'method': 'CONFIG',
'link_status': 'down',
'line_protocol': 'down'
},
'Vlan170': {
'ipv4': {
'unnumbered': {
'unnumbered_intf_ref': 'unassigned'
},
},
'check': 'YES',
'method': 'unset',
'link_status': 'up',
'line_protocol': 'up'
}
}
}
golden_output = {'execute.return_value': '''
Interface IP-Address OK? Method Status Protocol
Control0/0 10.10.1.1 YES CONFIG up up
GigabitEthernet0/0 10.10.1.1 YES CONFIG up up
GigabitEthernet0/1 unassigned YES unset admin down down
GigabitEthernet0/2 10.10.1.1 YES manual admin down down
GigabitEthernet0/3 10.10.1.1 YES DHCP admin down down
Management0/0 10.10.1.1 YES CONFIG up
Vlan150 10.10.1.1 YES CONFIG up up
Vlan160 10.10.1.1 YES CONFIG down down
Vlan170 unassigned YES unset up up
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
interface_obj = ShowInterfaceIpBrief(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfaceIpBrief(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output)
# =============================================
# Parser for 'show interface detail'
# =============================================
class test_show_interface_detail(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'interfaces': {
'Vlan5000': {
'interface_state': True,
'config_status': True,
'name': 'pod248',
'link_status': True,
'line_protocol': True,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0',
'traffic_statistics': {
'packets_input': 34354322,
'bytes_input': 32443242111,
'packets_output': 92739172,
'bytes_output': 4309803982,
'packets_dropped': 2551519
},
'control_point_states': {
'interface': {
'interface_number': 899,
'interface_config_status': 'active',
'interface_state': 'active'
},
'Vlan5000':{
'interface_vlan_config_status': 'active',
'interface_vlan_state': 'UP'
},
}
}
}
}
golden_output = {'execute.return_value': '''
DevNet-asa-sm-1/admin# show interface detail
Interface Vlan5000 "pod248", is up, line protocol is up
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Traffic Statistics for "pod248":
34354322 packets input, 32443242111 bytes
92739172 packets output, 4309803982 bytes
2551519 packets dropped
Control Point Interface States:
Interface number is 899
Interface config status is active
Interface state is active
Control Point Vlan5000 States:
Interface vlan config status is active
Interface vlan state is UP
'''}
golden_parsed_output_2 = {
'interfaces': {
'Vlan6000': {
'interface_state': False,
'config_status': True,
'name': 'pod555',
'link_status': False,
'line_protocol': False,
'mac_address': 'aa11.bbff.ee55',
'mtu': 1500,
'ipv4': {
'10.10.1.1': {
'ip': '10.10.1.1'
},
},
'subnet': '255.255.255.0',
'traffic_statistics': {
'packets_input': 0,
'bytes_input': 0,
'packets_output': 0,
'bytes_output': 0,
'packets_dropped': 0
},
'control_point_states': {
'interface': {
'interface_number': 612,
'interface_config_status': 'active',
'interface_state': 'not active'
},
'Vlan6000':{
'interface_vlan_config_status': 'not active',
'interface_vlan_state': 'DOWN'
},
}
}
}
}
golden_output_2 = {'execute.return_value': '''
DevNet-asa-sm-1/admin# show interface detail
Interface Vlan6000 "pod555", is down, line protocol is down
MAC address aa11.bbff.ee55, MTU 1500
IP address 10.10.1.1, subnet mask 255.255.255.0
Traffic Statistics for "pod555":
0 packets input, 0 bytes
0 | |
adj_list and in_degree.
# For each pair of adjacent words...
for first_word, second_word in zip(words, words[1:]):
for c, d in zip(first_word, second_word):
if c != d:
if d not in adj_list[c]:
adj_list[c].add(d)
in_degree[d] += 1
break
else: # Check that second word isn't a prefix of first word.
if len(second_word) < len(first_word):
return ""
# Step 2: We need to repeatedly pick off nodes with an indegree of 0.
output = []
queue = deque([c for c in in_degree if in_degree[c] == 0])
while queue:
c = queue.popleft()
output.append(c)
for d in adj_list[c]:
in_degree[d] -= 1
if in_degree[d] == 0:
queue.append(d)
# If not all letters are in output, that means there was a cycle and so
# no valid ordering. Return "" as per the problem description.
if len(output) < len(in_degree):
return ""
# Otherwise, convert the ordering we found into a string and return it.
return "".join(output)
class UnionFind:
def __init__(self, n: int):
# O(n) space
self.parent = [node for node in range(n)]
self.size = [1] * n
def find(self, A: int) -> int:
# O(inverse Ackermann function) time
while self.parent[A] != A:
A, self.parent[A] = self.parent[A], self.parent[self.parent[A]]
return A
def union(self, A: int, B: int) -> bool:
"""True if a merge happened, False otherwise"""
# O(inverse Ackermann function) time
root_A = self.find(A)
root_B = self.find(B)
if root_A == root_B:
return False
if self.size[root_A] < self.size[root_B]:
root_A, root_B = root_B, root_A
self.parent[root_B] = root_A
self.size[root_A] += self.size[root_B]
return True
class GraphUnion:
"""
# - Graph Valid Tree (Leetcode Premium) -
# https://leetcode.com/problems/graph-valid-tree/
You have a graph of `n` nodes labeled from `0` to `n - 1`. You are given an
integer `n` and a list of edges where `edges[i] = [ai, bi]` indicates that there
is an undirected edge between nodes `ai` and `bi` in the graph.
Return `true` if the edges of the given graph make up a valid tree, and
`false` otherwise.
Example 1:
Input: n = 5, edges = [[0,1],[0,2],[0,3],[1,4]]
Output: true
Example 2:
Input: n = 5, edges = [[0,1],[1,2],[2,3],[1,3],[1,4]]
Output: false
"""
"""
Recall that a graph, G, is a tree iff the following two conditions are met:
- G is fully connected. In other words, for every pair of nodes in G, there is
a path between them.
- G contains no cycles. In other words, there is exactly one path between each
pair of nodes in G.
"""
def validTree(self, n: int, edges: List[List[int]]) -> bool:
# BFS, O(n) time, O(n) space
if len(edges) != n - 1:
return False
adj_list = [[] for _ in range(n)] # type:ignore
for A, B in edges:
adj_list[A].append(B)
adj_list[B].append(A)
seen = {0} # only to protect against cycles
queue: Deque[int] = deque([0])
while queue:
node = queue.popleft()
for neighbour in adj_list[node]:
if neighbour in seen:
continue
seen.add(neighbour)
queue.append(neighbour)
return len(seen) == n
def validTree_(self, n: int, edges: List[List[int]]) -> bool:
# O(N * inverse Ackermann function) time, O(N) space
if len(edges) != n - 1:
return False
unionFind = UnionFind(n)
for A, B in edges:
if not unionFind.union(A, B):
return False
return True
"""
# - Number of Connected Components in an Undirected Graph (Leetcode Premium) -
# https://leetcode.com/problems/number-of-connected-components-in-an-undirected-graph/
You have a graph of n nodes. You are given an integer n and an array edges where
edges[i] = [ai, bi] indicates that there is an edge between ai and bi in the graph.
Return the number of connected components in the graph.
Example 1:
0 - 1 3
| |
2 4
Input: n = 5, edges = [[0,1],[1,2],[3,4]]
Output: 2
Example 2:
0 - 1 3
|/|
2 4
Input: n = 5, edges = [[0,1],[1,2],[2,3],[3,4]]
Output: 1
"""
def countComponents(self, n: int, edges: List[List[int]]) -> int:
# O(N * inverse Ackermann function) time, O(N) space
uf = UnionFind(n)
for edge in edges:
uf.union(edge[0], edge[1])
parent = set()
for i in range(n):
parent.add(uf.find(i))
return len(parent)
class Interval:
"""
# - Non-overlapping Intervals -
# https://leetcode.com/problems/non-overlapping-intervals/
Given an array of intervals intervals where intervals[i] = [starti, endi],
return the minimum number of intervals you need to remove to make the rest
of the intervals non-overlapping.
Example 1:
Input: intervals = [[1,2],[2,3],[3,4],[1,3]]
Output: 1
Explanation: [1,3] can be removed and the rest of the intervals
are non-overlapping.
Example 2:
Input: intervals = [[1,2],[1,2],[1,2]]
Output: 2
Explanation: You need to remove two [1,2] to make the rest of the intervals
non-overlapping.
Example 3:
Input: intervals = [[1,2],[2,3]]
Output: 0
Explanation: You don't need to remove any of the intervals since they're
already non-overlapping.
"""
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
# O(nlogn) time O(1) space
ans, prev = 0, -inf
for x, y in sorted(intervals, key=lambda x: x[1]):
if x < prev:
ans += 1
else:
prev = y
return ans
"""
# - Merge Intervals -
# https://leetcode.com/problems/merge-intervals/
Given an array of intervals where intervals[i] = [starti, endi], merge all
overlapping intervals, and return an array of the non-overlapping intervals
that cover all the intervals in the input.
Example 1:
Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
Example 2:
Input: intervals = [[1,4],[4,5]]
Output: [[1,5]]
Explanation: Intervals [1,4] and [4,5] are considered overlapping.
"""
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
ans: List[List[int]] = []
for x, y in sorted(intervals, key=lambda x: x[1]):
while ans and x <= ans[-1][1]:
x = min(x, ans.pop()[0])
ans.append([x, y])
return ans
"""
# - Insert Interval -
# https://leetcode.com/problems/insert-interval/
You are given an array of non-overlapping intervals intervals where intervals[i] =
[starti, endi] represent the start and the end of the ith interval and intervals is
sorted in ascending order by starti. You are also given an interval newInterval =
[start, end] that represents the start and end of another interval.
Insert newInterval into intervals such that intervals is still sorted in ascending
order by starti and intervals still does not have any overlapping intervals
(merge overlapping intervals if necessary).
Return intervals after the insertion.
Example 1:
Input: intervals = [[1,3],[6,9]], newInterval = [2,5]
Output: [[1,5],[6,9]]
Example 2:
Input: intervals = [[1,2],[3,5],[6,7],[8,10],[12,16]], newInterval = [4,8]
Output: [[1,2],[3,10],[12,16]]
Explanation: Because the new interval [4,8] overlaps with [3,5],[6,7],[8,10].
"""
def insert(
self, intervals: List[List[int]], newInterval: List[int]
) -> List[List[int]]:
ans: List[List[int]] = []
for i, interval in enumerate(intervals):
if interval[1] < newInterval[0]: # old: [1,2] new: [3,4]
ans.append(interval)
elif interval[0] <= newInterval[1]: # old: [2 3] new: [2 5]
newInterval[0] = min(newInterval[0], interval[0])
newInterval[1] = max(newInterval[1], interval[1])
else: # old: [5 6] new: [3 4]
ans.append(newInterval)
return ans + intervals[i:]
return ans + [newInterval]
"""
# - Meeting Rooms (Leetcode Premium) -
# https://leetcode.com/problems/meeting-rooms/
Given an array of meeting time intervals where intervals[i] = [starti, endi],
determine if a person could attend all meetings.
Example 1:
Input: intervals = [[0,30],[5,10],[15,20]]
Output: false
Example 2:
Input: intervals = [[7,10],[2,4]]
Output: true
"""
def canAttendMeetings(self, intervals: List[List[int]]) -> bool:
intervals.sort()
return all(
intervals[i - 1][1] <= intervals[i][0] for i in range(1, len(intervals))
)
def canAttendMeetings_(self, intervals: List[List[int]]) -> bool:
# O(nlogn) time, O(1) space
intervals.sort()
for i in range(len(intervals) - 1):
if intervals[i][1] > intervals[i + 1][0]:
return False
return True
"""
# - Meeting Rooms II (Leetcode Premium) -
# https://leetcode.com/problems/meeting-rooms-ii/
Given an array of meeting time intervals intervals where intervals[i] =
[starti, endi], return the minimum number of conference rooms required.
Example 1:
Input: intervals = [[0,30],[5,10],[15,20]]
Output: 2
Example 2:
Input: intervals = [[7,10],[2,4]]
Output: 1
"""
def minMeetingRooms(self, intervals: List[List[int]]) -> int:
# O(nlogn) time, O(n) space
if not intervals:
return 0
free_rooms = [] # type:ignore
intervals.sort(key=lambda x: x[0])
heappush(free_rooms, intervals[0][1])
for i in intervals[1:]:
if free_rooms[0] <= i[0]:
heappop(free_rooms)
heappush(free_rooms, i[1])
return len(free_rooms)
class ListNode:
"""LinkedList helper"""
def __init__(self, val: int = 0, next: Optional[ListNode] = None):
self.val = val
self.next | |
tmp5[i] = 1 {id=insn6,conflicts=g1}
""", name="nosync")
orig_prog = lp.set_temporary_scope(orig_prog, "tmp3", "local")
orig_prog = lp.set_temporary_scope(orig_prog, "tmp5", "local")
# No dependency present - don't add nosync
prog = lp.add_nosync(orig_prog, "any", "writes:tmp", "writes:tmp2",
empty_ok=True)
assert frozenset() == (
prog["nosync"].id_to_insn["insn2"].no_sync_with)
# Dependency present
prog = lp.add_nosync(orig_prog, "local", "writes:tmp3", "reads:tmp3")
assert frozenset() == (
prog["nosync"].id_to_insn["insn3"].no_sync_with)
assert frozenset([("insn3", "local")]) == (
prog["nosync"].id_to_insn["insn4"].no_sync_with)
# Bidirectional
prog = lp.add_nosync(
orig_prog, "local", "writes:tmp3", "reads:tmp3", bidirectional=True)
assert frozenset([("insn4", "local")]) == (
prog["nosync"].id_to_insn["insn3"].no_sync_with)
assert frozenset([("insn3", "local")]) == (
prog["nosync"].id_to_insn["insn4"].no_sync_with)
# Groups
prog = lp.add_nosync(orig_prog, "local", "insn5", "insn6")
assert frozenset([("insn5", "local")]) == (
prog["nosync"].id_to_insn["insn6"].no_sync_with)
def test_uniquify_instruction_ids():
i1 = lp.Assignment("b", 1, id=None)
i2 = lp.Assignment("b", 1, id=None)
i3 = lp.Assignment("b", 1, id=lp.UniqueName("b"))
i4 = lp.Assignment("b", 1, id=lp.UniqueName("b"))
prog = lp.make_kernel("{[i]: i = 1}", [], name="lpy_knl")
new_root_kernel = prog["lpy_knl"].copy(instructions=[i1, i2, i3, i4])
prog = prog.with_kernel(new_root_kernel)
from loopy.transform.instruction import uniquify_instruction_ids
prog = uniquify_instruction_ids(prog)
insn_ids = {insn.id for insn in prog["lpy_knl"].instructions}
assert len(insn_ids) == 4
assert all(isinstance(id, str) for id in insn_ids)
def test_split_iname_only_if_in_within():
prog = lp.make_kernel(
"{[i]: 0<=i<10}",
"""
c[i] = 3*d[i] {id=to_split}
a[i] = 2*b[i] {id=not_to_split}
""", name="splitter")
prog = lp.split_iname(prog, "i", 4, within="id:to_split")
for insn in prog["splitter"].instructions:
if insn.id == "to_split":
assert insn.within_inames == frozenset({"i_outer", "i_inner"})
if insn.id == "not_to_split":
assert insn.within_inames == frozenset({"i"})
def test_nested_substs_in_insns(ctx_factory):
ctx = ctx_factory()
import loopy as lp
ref_prg = lp.make_kernel(
"{[i]: 0<=i<10}",
"""
a(x) := 2 * x
b(x) := x**2
c(x) := 7 * x
f[i] = c(b(a(i)))
"""
)
t_unit = lp.expand_subst(ref_prg)
assert not any(
cknl.subkernel.substitutions
for cknl in t_unit.callables_table.values())
lp.auto_test_vs_ref(ref_prg, ctx, t_unit)
# {{{ test_map_domain_vs_split_iname
def _ensure_dim_names_match_and_align(obj_map, tgt_map):
# (This function is also defined in independent, unmerged branch
# new-dependency-and-nest-constraint-semantics-development, and used in
# child branches thereof. Once these branches are all merged, it may make
# sense to move this function to a location for more general-purpose
# machinery. In the other branches, this function's name excludes the
# leading underscore.)
from islpy import align_spaces
from islpy import dim_type as dt
# first make sure names match
if not all(
set(obj_map.get_var_names(dt)) == set(tgt_map.get_var_names(dt))
for dt in
[dt.in_, dt.out, dt.param]):
raise ValueError(
"Cannot align spaces; names don't match:\n%s\n%s"
% (obj_map, tgt_map))
return align_spaces(obj_map, tgt_map)
def test_map_domain_vs_split_iname(ctx_factory):
# {{{ Make kernel
knl = lp.make_kernel(
[
"[nx,nt] -> {[x, t]: 0 <= x < nx and 0 <= t < nt}",
"[ni] -> {[i]: 0 <= i < ni}",
],
"""
a[x,t] = b[x,t] {id=stmta}
c[x,t] = d[x,t] {id=stmtc}
e[i] = f[i]
""",
lang_version=(2018, 2),
)
knl = lp.add_and_infer_dtypes(knl, {"b,d,f": np.float32})
ref_knl = knl
# }}}
# {{{ Apply domain change mapping
knl_map_dom = ref_knl
# Create map_domain mapping:
import islpy as isl
transform_map = isl.BasicMap(
"[nt] -> {[t] -> [t_outer, t_inner]: "
"0 <= t_inner < 32 and "
"32*t_outer + t_inner = t and "
"0 <= 32*t_outer + t_inner < nt}")
# Call map_domain to transform kernel
knl_map_dom = lp.map_domain(knl_map_dom, transform_map)
# Prioritize loops (prio should eventually be updated in map_domain?)
loop_priority = "x, t_outer, t_inner"
knl_map_dom = lp.prioritize_loops(knl_map_dom, loop_priority)
# Get a linearization
proc_knl_map_dom = lp.preprocess_kernel(knl_map_dom)
lin_knl_map_dom = lp.get_one_linearized_kernel(
proc_knl_map_dom["loopy_kernel"], proc_knl_map_dom.callables_table)
# }}}
# {{{ Split iname and see if we get the same result
knl_split_iname = ref_knl
knl_split_iname = lp.split_iname(knl_split_iname, "t", 32)
knl_split_iname = lp.prioritize_loops(knl_split_iname, loop_priority)
proc_knl_split_iname = lp.preprocess_kernel(knl_split_iname)
lin_knl_split_iname = lp.get_one_linearized_kernel(
proc_knl_split_iname["loopy_kernel"], proc_knl_split_iname.callables_table)
for d_map_domain, d_split_iname in zip(
knl_map_dom["loopy_kernel"].domains,
knl_split_iname["loopy_kernel"].domains):
d_map_domain_aligned = _ensure_dim_names_match_and_align(
d_map_domain, d_split_iname)
assert d_map_domain_aligned == d_split_iname
for litem_map_domain, litem_split_iname in zip(
lin_knl_map_dom.linearization, lin_knl_split_iname.linearization):
assert litem_map_domain == litem_split_iname
# Can't easily compare instructions because equivalent subscript
# expressions may have different orders
lp.auto_test_vs_ref(proc_knl_split_iname, ctx_factory(), proc_knl_map_dom,
parameters={"nx": 128, "nt": 128, "ni": 128})
# }}}
# }}}
# {{{ test_map_domain_transform_map_validity_and_errors
def test_map_domain_transform_map_validity_and_errors(ctx_factory):
# {{{ Make kernel
knl = lp.make_kernel(
[
"[nx,nt] -> {[x, y, z, t]: 0 <= x,y,z < nx and 0 <= t < nt}",
"[m] -> {[j]: 0 <= j < m}",
],
"""
a[y,x,t,z] = b[y,x,t,z] {id=stmta}
for j
<>temp = j {dep=stmta}
end
""",
lang_version=(2018, 2),
)
knl = lp.add_and_infer_dtypes(knl, {"b": np.float32})
ref_knl = knl
# }}}
# {{{ Make sure map_domain *succeeds* when map includes 2 of 4 dims in one
# domain.
# {{{ Apply domain change mapping that splits t and renames y; (similar to
# split_iname test above, but doesn't hurt to test this slightly different
# scenario)
knl_map_dom = ref_knl
# Create map_domain mapping that only includes t and y
# (x and z should be unaffected)
import islpy as isl
transform_map = isl.BasicMap(
"[nx,nt] -> {[t, y] -> [t_outer, t_inner, y_new]: "
"0 <= t_inner < 16 and "
"16*t_outer + t_inner = t and "
"0 <= 16*t_outer + t_inner < nt and "
"y = y_new"
"}")
# Call map_domain to transform kernel; this should *not* produce an error
knl_map_dom = lp.map_domain(knl_map_dom, transform_map)
# Prioritize loops
desired_prio = "x, t_outer, t_inner, z, y_new"
# Use constrain_loop_nesting if it's available
cln_attr = getattr(lp, "constrain_loop_nesting", None)
if cln_attr is not None:
knl_map_dom = lp.constrain_loop_nesting( # noqa pylint:disable=no-member
knl_map_dom, desired_prio)
else:
knl_map_dom = lp.prioritize_loops(knl_map_dom, desired_prio)
# Get a linearization
proc_knl_map_dom = lp.preprocess_kernel(knl_map_dom)
lin_knl_map_dom = lp.get_one_linearized_kernel(
proc_knl_map_dom["loopy_kernel"], proc_knl_map_dom.callables_table)
# }}}
# {{{ Use split_iname and rename_iname, and make sure we get the same result
knl_split_iname = ref_knl
knl_split_iname = lp.split_iname(knl_split_iname, "t", 16)
knl_split_iname = lp.rename_iname(knl_split_iname, "y", "y_new")
try:
# Use constrain_loop_nesting if it's available
knl_split_iname = lp.constrain_loop_nesting(knl_split_iname, desired_prio)
except AttributeError:
knl_split_iname = lp.prioritize_loops(knl_split_iname, desired_prio)
proc_knl_split_iname = lp.preprocess_kernel(knl_split_iname)
lin_knl_split_iname = lp.get_one_linearized_kernel(
proc_knl_split_iname["loopy_kernel"], proc_knl_split_iname.callables_table)
for d_map_domain, d_split_iname in zip(
knl_map_dom["loopy_kernel"].domains,
knl_split_iname["loopy_kernel"].domains):
d_map_domain_aligned = _ensure_dim_names_match_and_align(
d_map_domain, d_split_iname)
assert d_map_domain_aligned == d_split_iname
for litem_map_domain, litem_split_iname in zip(
lin_knl_map_dom.linearization, lin_knl_split_iname.linearization):
assert litem_map_domain == litem_split_iname
# Can't easily compare instructions because equivalent subscript
# expressions may have different orders
lp.auto_test_vs_ref(proc_knl_split_iname, ctx_factory(), proc_knl_map_dom,
parameters={"nx": 32, "nt": 32, "m": 32})
# }}}
# }}}
# {{{ Make sure we error on a map that is not bijective
# Not bijective
transform_map = isl.BasicMap(
"[nx,nt] -> {[t, y, rogue] -> [t_new, y_new]: "
"y = y_new and t = t_new"
"}")
from loopy.diagnostic import LoopyError
knl = ref_knl
try:
knl = lp.map_domain(knl, transform_map)
raise AssertionError()
except LoopyError as err:
assert "map must be bijective" in str(err)
# }}}
# {{{ Make sure there's an error if transform map does not apply to
# exactly one domain.
test_maps = [
# Map where some inames match exactly one domain but there's also a
# rogue dim
isl.BasicMap(
"[nx,nt] -> {[t, y, rogue] -> [t_new, y_new, rogue_new]: "
"y = y_new and t = t_new and rogue = rogue_new"
"}"),
# Map where all inames match exactly one domain but there's also a
# rogue dim
isl.BasicMap(
"[nx,nt] -> {[t, y, x, z, rogue] -> "
"[t_new, y_new, x_new, z_new, rogue_new]: "
"y = y_new and t = t_new and x = x_new and z = z_new "
"and rogue = rogue_new"
"}"),
# Map where no inames match any domain
isl.BasicMap(
"[nx,nt] -> {[rogue] -> [rogue_new]: "
"rogue = rogue_new"
"}"),
]
for transform_map in test_maps:
try:
knl = lp.map_domain(knl, transform_map)
raise AssertionError()
except LoopyError as err:
assert (
"was not applicable to any domain. "
"Transform map must be applicable to exactly one domain."
in str(err))
# }}}
# {{{ Make sure there's an error if we try to map inames in priorities
knl = ref_knl
knl = lp.prioritize_loops(knl, "y, z")
knl = lp.prioritize_loops(knl, "x, z")
try:
transform_map = isl.BasicMap(
"[nx,nt] -> {[t, y] -> [t_new, y_new]: "
"y = y_new and t = t_new }")
knl = lp.map_domain(knl, transform_map)
raise AssertionError()
except ValueError as err:
assert (
"Loop priority ('y', 'z') contains iname(s) "
"transformed by map" in str(err))
# }}}
# {{{ Make sure we error when stmt.within_inames contains at least one but
# not all mapped inames
# {{{ Make potentially problematic kernel
knl = lp.make_kernel(
[
"[n, m] -> { [i, j]: 0 <= i < n and 0 <= j < m }",
"[ell] | |
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
# Copyright (c) 2020 Nordic Semiconductor NA
#
# SPDX-License-Identifier: Apache-2.0
"""Translate generic handles into ones optimized for the application.
Immutable device data includes information about dependencies,
e.g. that a particular sensor is controlled through a specific I2C bus
and that it signals event on a pin on a specific GPIO controller.
This information is encoded in the first-pass binary using identifiers
derived from the devicetree. This script extracts those identifiers
and replaces them with ones optimized for use with the devices
actually present.
For example the sensor might have a first-pass handle defined by its
devicetree ordinal 52, with the I2C driver having ordinal 24 and the
GPIO controller ordinal 14. The runtime ordinal is the index of the
corresponding device in the static devicetree array, which might be 6,
5, and 3, respectively.
The output is a C source file that provides alternative definitions
for the array contents referenced from the immutable device objects.
In the final link these definitions supersede the ones in the
driver-specific object file.
"""
import sys
import argparse
import os
import struct
import pickle
from packaging import version
import elftools
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
import elftools.elf.enums
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
'dts', 'python-devicetree', 'src'))
from devicetree import edtlib # pylint: disable=unused-import
if version.parse(elftools.__version__) < version.parse('0.24'):
sys.exit("pyelftools is out of date, need version 0.24 or later")
scr = os.path.basename(sys.argv[0])
def debug(text):
if not args.verbose:
return
sys.stdout.write(scr + ": " + text + "\n")
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=True,
help="Input zephyr ELF binary")
parser.add_argument("-d", "--num-dynamic-devices", required=False, default=0,
type=int, help="Input number of dynamic devices allowed")
parser.add_argument("-o", "--output-source", required=True,
help="Output source file")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
parser.add_argument("-z", "--zephyr-base",
help="Path to current Zephyr base. If this argument \
is not provided the environment will be checked for \
the ZEPHYR_BASE environment variable.")
parser.add_argument("-s", "--start-symbol", required=True,
help="Symbol name of the section which contains the \
devices. The symbol name must point to the first \
device in that section.")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
ZEPHYR_BASE = args.zephyr_base or os.getenv("ZEPHYR_BASE")
if ZEPHYR_BASE is None:
sys.exit("-z / --zephyr-base not provided. Please provide "
"--zephyr-base or set ZEPHYR_BASE in environment")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/dts"))
def symbol_data(elf, sym):
addr = sym.entry.st_value
len = sym.entry.st_size
for section in elf.iter_sections():
start = section['sh_addr']
end = start + section['sh_size']
if (start <= addr) and (addr + len) <= end:
offset = addr - section['sh_addr']
return bytes(section.data()[offset:offset + len])
def symbol_handle_data(elf, sym):
data = symbol_data(elf, sym)
if data:
format = "<" if elf.little_endian else ">"
format += "%uh" % (len(data) / 2)
return struct.unpack(format, data)
# These match the corresponding constants in <device.h>
DEVICE_HANDLE_SEP = -32768
DEVICE_HANDLE_ENDS = 32767
DEVICE_HANDLE_NULL = 0
def handle_name(hdl):
if hdl == DEVICE_HANDLE_SEP:
return "DEVICE_HANDLE_SEP"
if hdl == DEVICE_HANDLE_ENDS:
return "DEVICE_HANDLE_ENDS"
if hdl == 0:
return "DEVICE_HANDLE_NULL"
return str(int(hdl))
class Device:
"""
Represents information about a device object and its references to other objects.
"""
def __init__(self, elf, ld_constants, sym, addr):
self.elf = elf
self.ld_constants = ld_constants
self.sym = sym
self.addr = addr
# Point to the handles instance associated with the device;
# assigned by correlating the device struct handles pointer
# value with the addr of a Handles instance.
self.__handles = None
self.__pm = None
@property
def obj_handles(self):
"""
Returns the value from the device struct handles field, pointing to the
array of handles for devices this device depends on.
"""
if self.__handles is None:
data = symbol_data(self.elf, self.sym)
format = "<" if self.elf.little_endian else ">"
if self.elf.elfclass == 32:
format += "I"
size = 4
else:
format += "Q"
size = 8
offset = self.ld_constants["_DEVICE_STRUCT_HANDLES_OFFSET"]
self.__handles = struct.unpack(format, data[offset:offset + size])[0]
return self.__handles
@property
def obj_pm(self):
"""
Returns the value from the device struct pm field, pointing to the
pm struct for this device.
"""
if self.__pm is None:
data = symbol_data(self.elf, self.sym)
format = "<" if self.elf.little_endian else ">"
if self.elf.elfclass == 32:
format += "I"
size = 4
else:
format += "Q"
size = 8
offset = self.ld_constants["_DEVICE_STRUCT_PM_OFFSET"]
self.__pm = struct.unpack(format, data[offset:offset + size])[0]
return self.__pm
class PMDevice:
"""
Represents information about a pm_device object and its references to other objects.
"""
def __init__(self, elf, ld_constants, sym, addr):
self.elf = elf
self.ld_constants = ld_constants
self.sym = sym
self.addr = addr
# Point to the device instance associated with the pm_device;
self.__flags = None
def is_domain(self):
"""
Checks if the device that this pm struct belongs is a power domain.
"""
if self.__flags is None:
data = symbol_data(self.elf, self.sym)
format = "<" if self.elf.little_endian else ">"
if self.elf.elfclass == 32:
format += "I"
size = 4
else:
format += "Q"
size = 8
offset = self.ld_constants["_PM_DEVICE_STRUCT_FLAGS_OFFSET"]
self.__flags = struct.unpack(format, data[offset:offset + size])[0]
return self.__flags & (1 << self.ld_constants["_PM_DEVICE_FLAG_PD"])
class Handles:
def __init__(self, sym, addr, handles, node):
self.sym = sym
self.addr = addr
self.handles = handles
self.node = node
self.dep_ord = None
self.dev_deps = None
self.ext_deps = None
def main():
parse_args()
assert args.kernel, "--kernel ELF required to extract data"
elf = ELFFile(open(args.kernel, "rb"))
edtser = os.path.join(os.path.split(args.kernel)[0], "edt.pickle")
with open(edtser, 'rb') as f:
edt = pickle.load(f)
pm_devices = {}
devices = []
handles = []
# Leading _ are stripped from the stored constant key
want_constants = set([args.start_symbol,
"_DEVICE_STRUCT_SIZEOF",
"_DEVICE_STRUCT_HANDLES_OFFSET"])
if args.num_dynamic_devices != 0:
want_constants.update(["_PM_DEVICE_FLAG_PD",
"_DEVICE_STRUCT_PM_OFFSET",
"_PM_DEVICE_STRUCT_FLAGS_OFFSET"])
ld_constants = dict()
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
for sym in section.iter_symbols():
if sym.name in want_constants:
ld_constants[sym.name] = sym.entry.st_value
continue
if sym.entry.st_info.type != 'STT_OBJECT':
continue
if sym.name.startswith("__device"):
addr = sym.entry.st_value
if sym.name.startswith("__device_"):
devices.append(Device(elf, ld_constants, sym, addr))
debug("device %s" % (sym.name,))
elif sym.name.startswith("__devicehdl_"):
hdls = symbol_handle_data(elf, sym)
# The first element of the hdls array is the dependency
# ordinal of the device, which identifies the devicetree
# node.
node = edt.dep_ord2node[hdls[0]] if (hdls and hdls[0] != 0) else None
handles.append(Handles(sym, addr, hdls, node))
debug("handles %s %d %s" % (sym.name, hdls[0] if hdls else -1, node))
if sym.name.startswith("__pm_device__") and not sym.name.endswith("_slot"):
addr = sym.entry.st_value
pm_devices[addr] = PMDevice(elf, ld_constants, sym, addr)
debug("pm device %s" % (sym.name,))
assert len(want_constants) == len(ld_constants), "linker map data incomplete"
devices = sorted(devices, key = lambda k: k.sym.entry.st_value)
device_start_addr = ld_constants[args.start_symbol]
device_size = 0
assert len(devices) == len(handles), 'mismatch devices and handles'
used_nodes = set()
for handle in handles:
handle.device = None
for device in devices:
if handle.addr == device.obj_handles:
handle.device = device
break
device = handle.device
assert device, 'no device for %s' % (handle.sym.name,)
device.handle = handle
if device_size == 0:
device_size = device.sym.entry.st_size
# The device handle is one plus the ordinal of this device in
# the device table.
device.dev_handle = 1 + int((device.sym.entry.st_value - device_start_addr) / device_size)
debug("%s dev ordinal %d" % (device.sym.name, device.dev_handle))
n = handle.node
if n is not None:
debug("%s dev ordinal %d\n\t%s" % (n.path, device.dev_handle, ' ; '.join(str(_) for _ in handle.handles)))
used_nodes.add(n)
n.__device = device
else:
debug("orphan %d" % (device.dev_handle,))
hv = handle.handles
hvi = 1
handle.dev_deps = []
handle.dev_injected = []
handle.dev_sups = []
hdls = handle.dev_deps
while hvi < len(hv):
h = hv[hvi]
if h == DEVICE_HANDLE_ENDS:
break
if h == DEVICE_HANDLE_SEP:
if hdls == handle.dev_deps:
hdls = handle.dev_injected
else:
hdls = handle.dev_sups
else:
hdls.append(h)
n = edt
hvi += 1
# Compute the dependency graph induced from the full graph restricted to the
# the nodes that exist in the application. Note that the edges in the
# induced graph correspond to paths in the full graph.
root = edt.dep_ord2node[0]
assert root not in used_nodes
for n in used_nodes:
# Where we're storing the final set of nodes: these are all used
n.__depends = set()
n.__supports = set()
deps = set(n.depends_on)
debug("\nNode: %s\nOrig deps:\n\t%s" % (n.path, "\n\t".join([dn.path for dn in deps])))
while len(deps) > 0:
dn = deps.pop()
if dn in used_nodes:
# this is used
n.__depends.add(dn)
elif dn != root:
# forward the dependency up one level
for ddn in dn.depends_on:
deps.add(ddn)
debug("Final deps:\n\t%s\n" % ("\n\t".join([ _dn.path for _dn in n.__depends])))
sups = set(n.required_by)
debug("\nOrig sups:\n\t%s" % ("\n\t".join([dn.path for dn in sups])))
while len(sups) > | |
mu: Population size of the GA
:param lambda_: Offpsring size of the GA
:param population: Initial population of candidates to be used by the MIES
:param parameters: Parameters object to be used by the GA
"""
def __init__(self, n, fitnessFunction, budget, mu, lambda_, population, parameters=None):
if parameters is None:
parameters = Parameters(n=n, budget=budget, mu=mu, lambda_=lambda_)
# We use functions here to 'hide' the additional passing of parameters that are algorithm specific
recombine = Rec.random
mutate = partial(Mut.mutateMixedInteger, options=options, num_options_per_module=num_options_per_module)
best = Sel.bestGA
def select(pop, new_pop, _, params):
return best(pop, new_pop, params)
def mutateParameters(_):
pass # The only actual parameter mutation is the self-adaptive step-size of each individual
functions = {
'recombine': recombine,
'mutate': mutate,
'select': select,
'mutateParameters': mutateParameters,
}
super(GAOptimizer, self).__init__(population, fitnessFunction, budget, functions, parameters)
class MIESOptimizer(EvolutionaryOptimizer):
"""
Defines a Mixed-Integer Evolution Strategy (MIES) that evolves an Evolution Strategy (ES)
for a given fitness function
:param n: Dimensionality of the search-space for the MIES
:param fitnessFunction: Fitness function the MIES should use to evaluate candidate solutions
:param budget: The budget for the MIES
:param mu: Population size of the MIES
:param lambda_: Offpsring size of the MIES
:param population: Initial population of candidates to be used by the MIES
:param parameters: Parameters object to be used by the MIES
"""
def __init__(self, n, mu, lambda_, population, fitnessFunction, budget, parameters=None):
if parameters is None:
parameters = Parameters(n=n, budget=budget, mu=mu, lambda_=lambda_)
# We use functions here to 'hide' the additional passing of parameters that are algorithm specific
recombine = Rec.MIES_recombine
mutate = partial(Mut.MIES_Mutate, options=options, num_options=num_options_per_module)
best = Sel.bestGA
def select(pop, new_pop, _, params):
return best(pop, new_pop, params)
def mutateParameters(_):
pass # The only actual parameter mutation is the self-adaptive step-size of each individual
functions = {
'recombine': recombine,
'mutate': mutate,
'select': select,
'mutateParameters': mutateParameters,
}
super(MIESOptimizer, self).__init__(population, fitnessFunction, budget, functions, parameters)
class CustomizedES(EvolutionaryOptimizer):
"""
This function accepts a dictionary of options 'opts' which selects from a large range of different
functions and combinations of those. Instrumental in Evolving Evolution Strategies
:param n: Dimensionality of the problem to be solved
:param fitnessFunction: Function to determine the fitness of an individual
:param budget: Number of function evaluations allowed for this algorithm
:param mu: Number of individuals that form the parents of each generation
:param lambda_: Number of individuals in the offspring of each generation
:param opts: Dictionary containing the options (elitist, active, threshold, etc) to be used
:param values: Dictionary containing initial values for initializing (some of) the parameters
"""
# TODO: make dynamically dependent
bool_default_opts = ['active', 'elitist', 'mirrored', 'orthogonal', 'sequential', 'threshold', 'tpa']
string_default_opts = ['base-sampler', 'ipop', 'selection', 'weights_option']
def __init__(self, n, fitnessFunction, budget, mu=None, lambda_=None, opts=None, values=None):
if opts is None:
opts = dict()
self.addDefaults(opts)
self.n = n
l_bound = ones((n, 1)) * -5
u_bound = ones((n, 1)) * 5
lambda_, eff_lambda, mu = self.calculateDependencies(opts, lambda_, mu)
selector = Sel.pairwise if opts['selection'] == 'pairwise' else Sel.best
def select(pop, new_pop, _, param):
return selector(pop, new_pop, param)
# Pick the lowest-level sampler
if opts['base-sampler'] == 'quasi-sobol':
sampler = Sam.QuasiGaussianSobolSampling(n)
elif opts['base-sampler'] == 'quasi-halton' and Sam.halton_available:
sampler = Sam.QuasiGaussianHaltonSampling(n)
else:
sampler = Sam.GaussianSampling(n)
# Create an orthogonal sampler using the determined base_sampler
if opts['orthogonal']:
orth_lambda = eff_lambda
if opts['mirrored']:
orth_lambda = max(orth_lambda // 2, 1)
sampler = Sam.OrthogonalSampling(n, lambda_=orth_lambda, base_sampler=sampler)
# Create a mirrored sampler using the sampler (structure) chosen so far
if opts['mirrored']:
sampler = Sam.MirroredSampling(n, base_sampler=sampler)
parameter_opts = {'n': n, 'budget': budget, 'mu': mu, 'lambda_': lambda_, 'u_bound': u_bound,
'l_bound': l_bound,
'weights_option': opts['weights_option'], 'active': opts['active'],
'elitist': opts['elitist'],
'sequential': opts['sequential'], 'tpa': opts['tpa'], 'local_restart': opts['ipop'],
'values': values,
}
# In case of pairwise selection, sequential evaluation may only stop after 2mu instead of mu individuals
mu_int = int(1 + floor(mu * (eff_lambda - 1)))
if opts['sequential'] and opts['selection'] == 'pairwise':
parameter_opts['seq_cutoff'] = 2
population = [FloatIndividual(n) for _ in range(mu_int)]
# Init all individuals of the first population at the same random point in the search space
wcm = (np.random.randn(n, 1) * (u_bound - l_bound)) + l_bound
parameter_opts['wcm'] = wcm
for individual in population:
individual.genotype = copy(wcm)
# We use functions/partials here to 'hide' the additional passing of parameters that are algorithm specific
recombine = Rec.weighted
mutate = partial(Mut.CMAMutation, sampler=sampler, threshold_convergence=opts['threshold'])
functions = {
'recombine': recombine,
'mutate': mutate,
'select': select,
'mutateParameters': None
}
super(CustomizedES, self).__init__(population, fitnessFunction, budget, functions, parameter_opts)
def addDefaults(self, opts):
# Boolean defaults, if not given
for op in self.bool_default_opts:
if op not in opts:
opts[op] = False
# String defaults, if not given
for op in self.string_default_opts:
if op not in opts:
opts[op] = None
def calculateDependencies(self, opts, lambda_, mu):
if lambda_ is None:
lambda_ = int(4 + floor(3 * log(self.n)))
eff_lambda = lambda_
if mu is None:
mu = 0.5
if opts['tpa']:
if lambda_ <= 4:
lambda_ = 4
eff_lambda = 2
else:
eff_lambda = lambda_ - 2
if opts['selection'] == 'pairwise':
# Explicitly force lambda_ to be even
if lambda_ % 2 == 1:
lambda_ -= 1
if lambda_ == 0: # If lambda_ is too low, make it be at least one pair
lambda_ += 2
if opts['tpa']:
if lambda_ == 2:
lambda_ += 2
eff_lambda = lambda_ - 2
else:
eff_lambda = lambda_
if mu >= 0.5: # We cannot select more than half of the population when only half is actually available
mu /= 2
return lambda_, eff_lambda, mu
def _baseAlgorithm(population, fitnessFunction, budget, functions, parameters, parallel=False):
"""
Skeleton function for all ES algorithms
Requires a population, fitness function handle, evaluation budget and the algorithm-specific functions
The algorithm-specific functions should (roughly) behave as follows:
* ``recombine`` The current population (mu individuals) is passed to this function, and should return a new population
(lambda individuals), generated by some form of recombination
* ``mutate`` An individual is passed to this function and should be mutated 'in-line', no return is expected
* ``select`` The original parents, new offspring and used budget are passed to this function, and should return a new
population (mu individuals) after (mu+lambda) or (mu,lambda) selection
* ``mutateParameters`` Mutates and/or updates all parameters where required
:param population: Initial set of individuals that form the starting population of the algorithm
:param fitnessFunction: Function to determine the fitness of an individual
:param budget: Number of function evaluations allowed for this algorithm
:param functions: Dict with (lambda) functions 'recombine', 'mutate', 'select' and 'mutateParameters'
:param parameters: Parameters object for storing relevant settings
:param parallel: Can be set to True to enable parallel evaluation. This disables sequential evaluation
:returns: The statistics generated by running the algorithm
"""
baseAlg = EvolutionaryOptimizer(population, fitnessFunction, budget, functions, parameters, parallel)
baseAlg.runOptimizer()
return baseAlg.used_budget, (baseAlg.generation_size, baseAlg.sigma_over_time,
baseAlg.fitness_over_time, baseAlg.best_individual)
def _localRestartAlgorithm(fitnessFunction, budget, functions, parameter_opts, parallel=False):
"""
Run the baseAlgorithm with the given specifications using a local-restart strategy.
:param fitnessFunction: Function to determine the fitness of an individual
:param budget: Number of function evaluations allowed for this algorithm
:param functions: Dict with (lambda) functions 'recombine', 'mutate', 'select' and 'mutateParameters'
:param parameter_opts: Dictionary containing the all keyword options that will be used to initialize the
:class:`~modea.Parameters.Parameters` object
:param parallel: Can be set to True to enable parallel evaluation. This disables sequential evaluation
:return: The statistics generated by running the algorithm
"""
functions['mutateParameters'] = None # None to prevent KeyError, will be set later
baseAlg = EvolutionaryOptimizer(None, fitnessFunction, budget, functions, parameter_opts, parallel)
baseAlg.runLocalRestartOptimizer()
return baseAlg.generation_size, baseAlg.sigma_over_time, baseAlg.fitness_over_time, baseAlg.best_individual
def _onePlusOneES(n, fitnessFunction, budget):
"""
Implementation of the default (1+1)-ES
Requires the length of the vector to be optimized, the handle of a fitness function to use and the budget
:param n: Dimensionality of the problem to be solved
:param fitnessFunction: Function to determine the fitness of an individual
:param budget: Number of function evaluations allowed for this algorithm
:returns: The statistics generated by running the algorithm
"""
one_plus_one = OnePlusOneOptimizer(n, fitnessFunction, budget)
one_plus_one.runOptimizer()
return one_plus_one.generation_size, one_plus_one.sigma_over_time, \
one_plus_one.fitness_over_time, one_plus_one.best_individual
def _CMA_ES(n, fitnessFunction, budget, mu=None, lambda_=None, elitist=False):
"""
Implementation | |
#! /usr/bin/env python3
###############################################################################
#
# Package: RoadNarrows Robotics Laelaps System V Init.d Console
#
# Link: https://github.com/roadnarrows-robotics/laelaps
#
# File: laelaps_init.d
#
## \file
##
## $LastChangedDate: 2016-03-18 09:57:27 -0600 (Fri, 18 Mar 2016) $
## $Rev: 4354 $
##
## \brief Graphical user interface console to control the Laelaps init.d
## daemons.
##
## \author <NAME> (<EMAIL>)
##
## \par Copyright
## \h_copy 2016-2017. RoadNarrows LLC.\n
## http://www.roadnarrows.com\n
## All Rights Reserved
##
# @EulaBegin@
# @EulaEnd@
#
###############################################################################
import sys
import os
import platform
import time
import math
import subprocess
import re
import threading
import getopt
from tkinter import *
from tkinter.constants import *
from tkinter.filedialog import *
import tkinter.font
# running as su does not necessary have all paths setup - so fix up here
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages")
sys.path.insert(0, "/prj/lib/python2.7/site-packages")
from Laelaps.Utils import *
## \brief Application version. Update as needed.
appVersion = '1.0.0'
## \brief Additional image search paths.
imagePath = [
"/prj/share/Laelaps/images",
"/prj/share/Laelaps/images/icons",
"/prj/share/appkit/images",
"/prj/share/appkit/images/icons",
"/usr/local/share/Laelaps/images",
"/usr/local/share/Laelaps/images/icons"
"/usr/local/share/appkit/images",
"/usr/local/share/appkit/images/icons"
]
## \brief Common foreground colors.
fgColors = {
'normal': 'black',
'ok': '#008800',
'focus': '#0000aa',
'warning': '#aa6600',
'error': '#cc0000'
}
## \brief Status text foreground colors.
statusText = {
'unknown': fgColors['normal'],
'running': fgColors['ok'],
'stopped': fgColors['error']
}
# pre-compiled regular expressions
reDone = re.compile(r"done", re.IGNORECASE)
reFail = re.compile(r"fail", re.IGNORECASE)
reDoneDone = re.compile(r"done.*done", re.DOTALL | re.IGNORECASE)
reFailDone = re.compile(r"fail.*done", re.DOTALL | re.IGNORECASE)
reRunning = re.compile(r"\s*is\s*running", re.IGNORECASE)
reNotRunning = re.compile(r"\s*is\s*not\s*running", re.IGNORECASE)
reActive = re.compile(r"\s*active:\s*active.*", re.IGNORECASE)
reInactive = re.compile(r"\s*active:\s*inactive.*", re.IGNORECASE)
#
## \brief Determine the OS specifics.
##
## \return Return a 3-tuple (osname, version, id)
#
def os_distribution():
try:
return platform.linux_distribution()
except:
return ('n/a', 'n/a', 'n/a')
#
## \brief Determine if OS version 1 is greater or equal to version 2.
##
## The version is expected to be in major[.minor] format.
##
## \param v1 Version 1 as a string, float, or int.
## \param v2 Version 2 as a string, float, or int.
##
## \return Returns True or False.
#
def os_ver_ge(v1, v2):
if type(v1) is 'str':
try:
v1 = float(v1)
except:
v1 = 0;
if type(v2) is 'str':
try:
v2 = float(v2)
except:
v2 = 0;
return v1 >= v2
# ------------------------------------------------------------------------------
# Class window
# ------------------------------------------------------------------------------
##
## \brief Window class supporting application.
##
class window(Frame):
#
## \brief Constructor.
##
## \param master Window parent master widget.
## \param cnf Configuration dictionary.
## \param kw Keyword options.
#
def __init__(self, master=None, cnf={}, **kw):
# intialize window data
kw = self.initData(kw)
self.m_imageLoader = ImageLoader(py_pkg='Laelaps.images',
image_paths=imagePath)
Frame.__init__(self, master=master, cnf=cnf, **kw)
self.master.title("Laelaps Init.d Console")
self.m_icons['app_icon'] = \
self.m_imageLoader.loadImage("icons/LaelapsInitIcon.png")
if self.m_icons['app_icon'] is not None:
self.master.tk.call('wm', 'iconphoto', self.master._w,
self.m_icons['app_icon'])
# craete and show widgets
self.createWidgets()
self.grid(row=0, column=0, padx=5, pady=5)
self.after(100, self.autoRefresh)
#
## \brief Initialize class state data.
##
## Any keywords for this application specific window that are not supported
## by the Frame Tkinter class must be removed.
##
## \param kw Keyword options.
##
## \return Modified keywords sans this specific class.
#
def initData(self, kw):
self.m_osdist = os_distribution()
self.m_debug = False # default debug level
self.m_icons = {} # must keep loaded icons referenced
self.m_wBttn = {} # button widgets
self.m_svcKeys = [
'laelaps_bsproxy', 'laelaps_roscore', 'laelaps_control',
'laelaps_xbox', 'laelaps_teleop']
self.m_svcDesc = {
'laelaps_bsproxy': 'BotSense Proxy Server',
'laelaps_roscore': 'ROS Master, Parameter Server, rosout logging node',
'laelaps_control': 'Laelaps Control ROS node',
'laelaps_xbox': 'HID Xbox360 daemon / ROS node',
'laelaps_teleop': 'Laelaps Teleoperation ROS node'}
self.m_lock = threading.Lock()
if 'debug' in kw:
self.m_debug = kw['debug']
del kw['debug']
# variables only used for debugging
if self.m_debug:
pass
return kw
#
## \brief Create gui widgets with supporting data and show.
#
def createWidgets(self):
self.createHeading()
self.createLeftButtons()
self.createCenterPanel()
self.createRightButtons()
self.update_idletasks()
self.createStatusBar()
#
## \brief Create top gui heading.
#
def createHeading(self):
# rn logo
w = Label(self)
self.m_icons['rn_logo'] = self.m_imageLoader.loadImage("RNLogo48.png");
if self.m_icons['rn_logo']:
w['image'] = self.m_icons['rn_logo']
else:
w['text'] = 'rn'
w['anchor'] = W
w['width'] = 5
w.grid(row=0, column=0, sticky=W)
# top heading
w = Label(self)
w['font'] = ('Helvetica', 16)
w['text'] = 'Laelaps Init.d Console'
w['anchor'] = CENTER
w.grid(row=0, column=1, sticky=E+W)
# laelaps logo
w = Label(self)
self.m_icons['laelaps_logo'] = \
self.m_imageLoader.loadImage("icon_laelaps_logo.png");
if self.m_icons['laelaps_logo']:
w['image'] = self.m_icons['laelaps_logo']
w['anchor'] = E
else:
w['text'] = 'laelaps'
w['anchor'] = E
w['width'] = 5
w.grid(row=0, column=2, sticky=E)
#
## \brief Create gui left hand side buttons.
#
def createLeftButtons(self):
wframe = Frame(self)
wframe['borderwidth'] = 2
wframe['relief'] = 'ridge'
wframe.grid(row=1, column=0, padx=1, pady=3, sticky=N+W+E)
row = 0
# start
w = self.createButton(wframe, "Start", "icon_play.png",
self.cbStartServices)
w['state'] = 'disabled'
w.grid(row=row, column=0, sticky=N+E+W)
row += 1
# stop
w = self.createButton(wframe, "Stop", "icon_stop.png",
self.cbStopServices)
w['state'] = 'disabled'
w.grid(row=row, column=0, sticky=N+E+W)
row += 1
# restart
w = self.createButton(wframe, "Restart", "icon_resume.png",
self.cbRestartServices)
w['state'] = 'disabled'
w.grid(row=row, column=0, sticky=N+E+W)
row += 1
#
## \brief Create gui center panel.
#
def createCenterPanel(self):
wframe = Frame(self)
wframe['borderwidth'] = 2
wframe['relief'] = 'ridge'
wframe.grid(row=1, column=1, padx=1, pady=3, sticky=N+W+E)
row = 0
col = 0
for text in ['Sel', 'Status', 'Service', 'Description']:
w = Label(wframe, text=text, foreground=fgColors['focus'])
w['font'] = ('Helvetica', 10, "bold")
w.grid(row=row, column=col, padx=3, pady=3, stick=W)
col += 1
row += 1
colspan = len(self.m_svcKeys)
bg0 = '#ffffcc'
bg1 = wframe['bg']
bg = bg0
self.m_service = { }
for key in self.m_svcKeys:
col = 0
service = { }
service['sel'] = IntVar(0)
w = Checkbutton(wframe, bg=bg, variable=service['sel'],
command=self.cbSelect)
w.grid(row=row, column=col, padx=1, pady=3, sticky=W)
col += 1
service['status'] = StringVar()
service['status'].set("unknown")
w = Label(wframe, bg=bg, textvariable=service['status'])
w['fg'] = statusText[service['status'].get()]
w['width'] = 8
w.grid(row=row, column=col, padx=1, pady=3, stick=W)
service['wstatus'] = w
col += 1
w = Label(wframe, bg=bg, anchor=W, justify=LEFT, text=key)
w.grid(row=row, column=col, padx=1, pady=3, stick=W+E)
col += 1
w = Label(wframe, bg=bg, anchor=W, justify=LEFT, text=self.m_svcDesc[key])
w.grid(row=row, column=col, padx=1, pady=3, stick=W+E)
col += 1
self.m_service[key] = service
if bg == bg0:
bg = bg1
else:
bg = bg0
row += 1
#
## \brief Create gui right hand side buttons.
#
def createRightButtons(self):
wframe = Frame(self)
wframe['borderwidth'] = 2
wframe['relief'] = 'ridge'
wframe.grid(row=1, column=2, padx=1, pady=3, sticky=N+W+E)
row = 0
# refresh
w = self.createButton(wframe, "Refresh", "icon_refresh.png",
self.cbRefreshStatus)
w.grid(row=row, column=0, sticky=N+E+W)
row += 1
# save
w = self.createButton(wframe, "Save", "icon_floppy.png",
self.cbSave)
w['state'] = 'disabled'
w.grid(row=row, column=0, sticky=N+E+W)
row += 1
# save
w = self.createButton(wframe, "Quit", "icon_exit.png",
self.destroy)
w.grid(row=row, column=0, sticky=N+E+W)
#
## \brief Create gui multi-line status bar at bottom of gui window.
#
def createStatusBar(self):
wframe = Frame(self)
wframe['borderwidth'] = 2
wframe['relief'] = 'ridge'
wframe.grid(row=2, column=0, columnspan=3, padx=1, pady=3, sticky=N+E+W+S)
w = Scrollbar(wframe)
w.grid(row=0, column=1, sticky=N+S)
self.m_wScrollBar = w;
self.m_wStatusBar = Text(wframe)
self.m_wStatusBar['width'] = 105
self.m_wStatusBar['height'] = 10
self.m_wStatusBar['wrap'] = WORD
self.m_wStatusBar['relief'] = 'flat'
self.m_wStatusBar['fg'] = fgColors['normal']
self.m_wStatusBar['state'] = 'disabled'
self.m_wStatusBar.grid(row=0, column=0, padx=3, pady=3, sticky=N+E+W+S)
# attach
self.m_wStatusBar['yscrollcommand'] = self.m_wScrollBar.set
self.m_wScrollBar['command'] = self.m_wStatusBar.yview
#
## \brief Create button.
##
## \param parent Parent widget.
## \param text Button text.
## \param imagefile Image file name. None for no image.
## \param command Callback for button push.
## \param fg Foreground text color.
##
## \return Button widget.
#
def createButton(self, parent, text, imagefile, command, fg='black'):
key = str.lower(text.replace("\n", "_"))
self.m_icons[key] = self.m_imageLoader.loadImage(imagefile)
w = Button(parent)
w['text'] = text
if self.m_icons[key]:
w['image'] = self.m_icons[key]
w['compound'] = LEFT
w['padx'] = 0
w['pady'] = 0
w['anchor'] = W
w['width'] = 105
else:
w['anchor'] = CENTER
w['width'] = 10
w['fg'] = fg
w['command'] = command
self.m_wBttn[key] = w
return self.m_wBttn[key]
#
## \brief Clear all select checkboxes.
#
def clearSelect(self):
for key in self.m_svcKeys:
self.m_service[key]['sel'].set(0)
self.cbSelect()
#
## \brief Checkbox change state callback.
#
def cbSelect(self):
bttns = ['start', 'stop', 'restart']
nselected = 0
for key in self.m_svcKeys:
if self.m_service[key]['sel'].get():
nselected += 1
if nselected > 0:
state = 'normal'
else:
state = 'disabled'
for key in bttns:
self.m_wBttn[key]['state'] = state
#
## \brief Start selected services callback.
#
def cbStartServices(self):
self.showSbInfo("Starting selected services...\n")
for key in self.m_svcKeys:
if self.m_service[key]['sel'].get():
text = " Starting {0} service".format(key)
self.showSbInfo(text)
self.update_idletasks()
pf = self.execStart(key)
self.showSbResult(text, pf)
#self.clearSelect()
self.refresh()
#
## \brief Stop selected services callback.
#
def cbStopServices(self):
self.showSbInfo("Stopping selected services...\n")
for key in self.m_svcKeys:
if self.m_service[key]['sel'].get():
text = " Stopping {0} service".format(key)
self.showSbInfo(text)
self.update_idletasks()
pf = self.execStop(key)
self.showSbResult(text, pf)
#self.clearSelect()
self.refresh()
#
## \brief Restart selected services callback.
#
def cbRestartServices(self):
self.showSbInfo("Restarting selected services...\n")
for key in self.m_svcKeys:
if self.m_service[key]['sel'].get():
text = " Restarting {0} service".format(key)
self.showSbInfo(text)
self.update_idletasks()
pf = self.execRestart(key)
self.showSbResult(text, pf)
#self.clearSelect()
self.refresh()
#
## | |
defined in subset_lists_file (see above).
limit_to: Limit the dataset to the first #limit_to frames (after other
filters have been applied).
limit_sequences_to: Limit the dataset to the first
#limit_sequences_to sequences (after other sequence filters have been
applied but before frame-based filters).
pick_sequence: A list of sequence names to restrict the dataset to.
exclude_sequence: A list of the names of the sequences to exclude.
limit_category_to: Restrict the dataset to the given list of categories.
dataset_root: The root folder of the dataset; all the paths in jsons are
specified relative to this root (but not json paths themselves).
load_images: Enable loading the frame RGB data.
load_depths: Enable loading the frame depth maps.
load_depth_masks: Enable loading the frame depth map masks denoting the
depth values used for evaluation (the points consistent across views).
load_masks: Enable loading frame foreground masks.
load_point_clouds: Enable loading sequence-level point clouds.
max_points: Cap on the number of loaded points in the point cloud;
if reached, they are randomly sampled without replacement.
mask_images: Whether to mask the images with the loaded foreground masks;
0 value is used for background.
mask_depths: Whether to mask the depth maps with the loaded foreground
masks; 0 value is used for background.
image_height: The height of the returned images, masks, and depth maps;
aspect ratio is preserved during cropping/resizing.
image_width: The width of the returned images, masks, and depth maps;
aspect ratio is preserved during cropping/resizing.
box_crop: Enable cropping of the image around the bounding box inferred
from the foreground region of the loaded segmentation mask; masks
and depth maps are cropped accordingly; cameras are corrected.
box_crop_mask_thr: The threshold used to separate pixels into foreground
and background based on the foreground_probability mask; if no value
is greater than this threshold, the loader lowers it and repeats.
box_crop_context: The amount of additional padding added to each
dimension of the cropping bounding box, relative to box size.
remove_empty_masks: Removes the frames with no active foreground pixels
in the segmentation mask after thresholding (see box_crop_mask_thr).
n_frames_per_sequence: If > 0, randomly samples #n_frames_per_sequence
frames in each sequences uniformly without replacement if it has
more frames than that; applied before other frame-level filters.
seed: The seed of the random generator sampling #n_frames_per_sequence
random frames per sequence.
sort_frames: Enable frame annotations sorting to group frames from the
same sequences together and order them by timestamps
eval_batches: A list of batches that form the evaluation set;
list of batch-sized lists of indices corresponding to __getitem__
of this class, thus it can be used directly as a batch sampler.
"""
frame_annotations_type: ClassVar[
Type[types.FrameAnnotation]
] = types.FrameAnnotation
path_manager: Optional[PathManager] = None
frame_annotations_file: str = ""
sequence_annotations_file: str = ""
subset_lists_file: str = ""
subsets: Optional[List[str]] = None
limit_to: int = 0
limit_sequences_to: int = 0
pick_sequence: Sequence[str] = ()
exclude_sequence: Sequence[str] = ()
limit_category_to: Sequence[int] = ()
dataset_root: str = ""
load_images: bool = True
load_depths: bool = True
load_depth_masks: bool = True
load_masks: bool = True
load_point_clouds: bool = False
max_points: int = 0
mask_images: bool = False
mask_depths: bool = False
image_height: Optional[int] = 256
image_width: Optional[int] = 256
box_crop: bool = False
box_crop_mask_thr: float = 0.4
box_crop_context: float = 1.0
remove_empty_masks: bool = False
n_frames_per_sequence: int = -1
seed: int = 0
sort_frames: bool = False
eval_batches: Optional[List[List[int]]] = None
frame_annots: List[FrameAnnotsEntry] = field(init=False)
seq_annots: Dict[str, types.SequenceAnnotation] = field(init=False)
def __post_init__(self) -> None:
# pyre-fixme[16]: `ImplicitronDataset` has no attribute `subset_to_image_path`.
self.subset_to_image_path = None
self._load_frames()
self._load_sequences()
if self.sort_frames:
self._sort_frames()
self._load_subset_lists()
self._filter_db() # also computes sequence indices
print(str(self))
def seq_frame_index_to_dataset_index(
self,
seq_frame_index: Union[
List[List[Union[Tuple[str, int, str], Tuple[str, int]]]],
],
) -> List[List[int]]:
"""
Obtain indices into the dataset object given a list of frames specified as
`seq_frame_index = List[List[Tuple[sequence_name:str, frame_number:int]]]`.
"""
# TODO: check the frame numbers are unique
_dataset_seq_frame_n_index = {
seq: {
self.frame_annots[idx]["frame_annotation"].frame_number: idx
for idx in seq_idx
}
for seq, seq_idx in self._seq_to_idx.items()
}
def _get_batch_idx(seq_name, frame_no, path=None) -> int:
idx = _dataset_seq_frame_n_index[seq_name][frame_no]
if path is not None:
# Check that the loaded frame path is consistent
# with the one stored in self.frame_annots.
assert os.path.normpath(
self.frame_annots[idx]["frame_annotation"].image.path
) == os.path.normpath(
path
), f"Inconsistent batch {seq_name, frame_no, path}."
return idx
batches_idx = [[_get_batch_idx(*b) for b in batch] for batch in seq_frame_index]
return batches_idx
def __str__(self) -> str:
return f"ImplicitronDataset #frames={len(self.frame_annots)}"
def __len__(self) -> int:
return len(self.frame_annots)
def _get_frame_type(self, entry: FrameAnnotsEntry) -> Optional[str]:
return entry["subset"]
def __getitem__(self, index) -> FrameData:
if index >= len(self.frame_annots):
raise IndexError(f"index {index} out of range {len(self.frame_annots)}")
entry = self.frame_annots[index]["frame_annotation"]
point_cloud = self.seq_annots[entry.sequence_name].point_cloud
frame_data = FrameData(
frame_number=_safe_as_tensor(entry.frame_number, torch.long),
frame_timestamp=_safe_as_tensor(entry.frame_timestamp, torch.float),
sequence_name=entry.sequence_name,
sequence_category=self.seq_annots[entry.sequence_name].category,
camera_quality_score=_safe_as_tensor(
self.seq_annots[entry.sequence_name].viewpoint_quality_score,
torch.float,
),
point_cloud_quality_score=_safe_as_tensor(
point_cloud.quality_score, torch.float
)
if point_cloud is not None
else None,
)
# The rest of the fields are optional
frame_data.frame_type = self._get_frame_type(self.frame_annots[index])
(
frame_data.fg_probability,
frame_data.mask_path,
frame_data.bbox_xywh,
clamp_bbox_xyxy,
) = self._load_crop_fg_probability(entry)
scale = 1.0
if self.load_images and entry.image is not None:
# original image size
frame_data.image_size_hw = _safe_as_tensor(entry.image.size, torch.long)
(
frame_data.image_rgb,
frame_data.image_path,
frame_data.mask_crop,
scale,
) = self._load_crop_images(
entry, frame_data.fg_probability, clamp_bbox_xyxy
)
if self.load_depths and entry.depth is not None:
(
frame_data.depth_map,
frame_data.depth_path,
frame_data.depth_mask,
) = self._load_mask_depth(entry, clamp_bbox_xyxy, frame_data.fg_probability)
if entry.viewpoint is not None:
frame_data.camera = self._get_pytorch3d_camera(
entry,
scale,
clamp_bbox_xyxy,
)
if self.load_point_clouds and point_cloud is not None:
frame_data.sequence_point_cloud_path = pcl_path = os.path.join(
self.dataset_root, point_cloud.path
)
frame_data.sequence_point_cloud = _load_pointcloud(
self._local_path(pcl_path), max_points=self.max_points
)
return frame_data
def _load_crop_fg_probability(
self, entry: types.FrameAnnotation
) -> Tuple[
Optional[torch.Tensor],
Optional[str],
Optional[torch.Tensor],
Optional[torch.Tensor],
]:
fg_probability, full_path, bbox_xywh, clamp_bbox_xyxy = (
None,
None,
None,
None,
)
if (self.load_masks or self.box_crop) and entry.mask is not None:
full_path = os.path.join(self.dataset_root, entry.mask.path)
mask = _load_mask(self._local_path(full_path))
if mask.shape[-2:] != entry.image.size:
raise ValueError(
f"bad mask size: {mask.shape[-2:]} vs {entry.image.size}!"
)
bbox_xywh = torch.tensor(_get_bbox_from_mask(mask, self.box_crop_mask_thr))
if self.box_crop:
clamp_bbox_xyxy = _get_clamp_bbox(bbox_xywh, self.box_crop_context)
mask = _crop_around_box(mask, clamp_bbox_xyxy, full_path)
fg_probability, _, _ = self._resize_image(mask, mode="nearest")
return fg_probability, full_path, bbox_xywh, clamp_bbox_xyxy
def _load_crop_images(
self,
entry: types.FrameAnnotation,
fg_probability: Optional[torch.Tensor],
clamp_bbox_xyxy: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, str, torch.Tensor, float]:
assert self.dataset_root is not None and entry.image is not None
path = os.path.join(self.dataset_root, entry.image.path)
image_rgb = _load_image(self._local_path(path))
if image_rgb.shape[-2:] != entry.image.size:
raise ValueError(
f"bad image size: {image_rgb.shape[-2:]} vs {entry.image.size}!"
)
if self.box_crop:
assert clamp_bbox_xyxy is not None
image_rgb = _crop_around_box(image_rgb, clamp_bbox_xyxy, path)
image_rgb, scale, mask_crop = self._resize_image(image_rgb)
if self.mask_images:
assert fg_probability is not None
image_rgb *= fg_probability
return image_rgb, path, mask_crop, scale
def _load_mask_depth(
self,
entry: types.FrameAnnotation,
clamp_bbox_xyxy: Optional[torch.Tensor],
fg_probability: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, str, torch.Tensor]:
entry_depth = entry.depth
assert entry_depth is not None
path = os.path.join(self.dataset_root, entry_depth.path)
depth_map = _load_depth(self._local_path(path), entry_depth.scale_adjustment)
if self.box_crop:
assert clamp_bbox_xyxy is not None
depth_bbox_xyxy = _rescale_bbox(
clamp_bbox_xyxy, entry.image.size, depth_map.shape[-2:]
)
depth_map = _crop_around_box(depth_map, depth_bbox_xyxy, path)
depth_map, _, _ = self._resize_image(depth_map, mode="nearest")
if self.mask_depths:
assert fg_probability is not None
depth_map *= fg_probability
if self.load_depth_masks:
assert entry_depth.mask_path is not None
mask_path = os.path.join(self.dataset_root, entry_depth.mask_path)
depth_mask = _load_depth_mask(self._local_path(mask_path))
if self.box_crop:
assert clamp_bbox_xyxy is not None
depth_mask_bbox_xyxy = _rescale_bbox(
clamp_bbox_xyxy, entry.image.size, depth_mask.shape[-2:]
)
depth_mask = _crop_around_box(
depth_mask, depth_mask_bbox_xyxy, mask_path
)
depth_mask, _, _ = self._resize_image(depth_mask, mode="nearest")
else:
depth_mask = torch.ones_like(depth_map)
return depth_map, path, depth_mask
def _get_pytorch3d_camera(
self,
entry: types.FrameAnnotation,
scale: float,
clamp_bbox_xyxy: Optional[torch.Tensor],
) -> PerspectiveCameras:
entry_viewpoint = entry.viewpoint
assert entry_viewpoint is not None
# principal point and focal length
principal_point = torch.tensor(
entry_viewpoint.principal_point, dtype=torch.float
)
focal_length = torch.tensor(entry_viewpoint.focal_length, dtype=torch.float)
half_image_size_wh_orig = (
torch.tensor(list(reversed(entry.image.size)), dtype=torch.float) / 2.0
)
# first, we convert from the dataset's NDC convention to pixels
format = entry_viewpoint.intrinsics_format
if format.lower() == "ndc_norm_image_bounds":
# this is e.g. currently used in CO3D for storing intrinsics
rescale = half_image_size_wh_orig
elif format.lower() == "ndc_isotropic":
rescale = half_image_size_wh_orig.min()
else:
raise ValueError(f"Unknown intrinsics format: {format}")
# principal point and focal length in pixels
principal_point_px = half_image_size_wh_orig - principal_point * rescale
focal_length_px = focal_length * rescale
if self.box_crop:
assert clamp_bbox_xyxy is not None
principal_point_px -= clamp_bbox_xyxy[:2]
# now, convert from pixels to PyTorch3D v0.5+ NDC convention
if self.image_height is None or self.image_width is None:
out_size = list(reversed(entry.image.size))
else:
out_size = [self.image_width, self.image_height]
half_image_size_output = torch.tensor(out_size, dtype=torch.float) / 2.0
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""The :math:`B_0` unwarping transform formalism."""
from pathlib import Path
import attr
import numpy as np
from scipy import ndimage as ndi
from scipy.sparse import vstack as sparse_vstack, csr_matrix, kron
import nibabel as nb
import nitransforms as nt
from nitransforms.base import _as_homogeneous
from bids.utils import listify
def _clear_shifts(instance, attribute, value):
instance.shifts = None
return value
@attr.s(slots=True)
class B0FieldTransform:
"""Represents and applies the transform to correct for susceptibility distortions."""
coeffs = attr.ib(default=None)
xfm = attr.ib(default=nt.linear.Affine(), on_setattr=_clear_shifts)
shifts = attr.ib(default=None, init=False)
def fit(self, spatialimage):
r"""
Generate the interpolation matrix (and the VSM with it).
Implements Eq. :math:`\eqref{eq:1}`, interpolating :math:`f(\mathbf{s})`
for all voxels in the target-image's extent.
Returns
-------
updated : :obj:`bool`
``True`` if the internal field representation was fit,
``False`` if cache was valid and will be reused.
"""
# Calculate the physical coordinates of target grid
if isinstance(spatialimage, (str, bytes, Path)):
spatialimage = nb.load(spatialimage)
if self.shifts is not None:
newaff = spatialimage.affine
newshape = spatialimage.shape
if np.all(newshape == self.shifts.shape) and np.allclose(
newaff, self.shifts.affine
):
return False
weights = []
coeffs = []
# Generate tensor-product B-Spline weights
for level in listify(self.coeffs):
self.xfm.reference = spatialimage
moved_cs = level.__class__(
level.dataobj, self.xfm.matrix @ level.affine, level.header
)
wmat = grid_bspline_weights(spatialimage, moved_cs)
weights.append(wmat)
coeffs.append(level.get_fdata(dtype="float32").reshape(-1))
# Interpolate the VSM (voxel-shift map)
vsm = np.zeros(spatialimage.shape[:3], dtype="float32")
vsm = (np.squeeze(np.hstack(coeffs).T) @ sparse_vstack(weights)).reshape(
vsm.shape
)
# Cache
self.shifts = nb.Nifti1Image(vsm, spatialimage.affine, None)
self.shifts.header.set_intent("estimate", name="Voxel shift")
self.shifts.header.set_xyzt_units("mm")
return True
def apply(
self,
spatialimage,
pe_dir,
ro_time,
order=3,
mode="constant",
cval=0.0,
prefilter=True,
output_dtype=None,
):
"""
Apply a transformation to an image, resampling on the reference spatial object.
Parameters
----------
spatialimage : `spatialimage`
The image object containing the data to be resampled in reference
space
reference : spatial object, optional
The image, surface, or combination thereof containing the coordinates
of samples that will be sampled.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional
Determines how the input image is extended when the resamplings overflows
a border. Default is 'constant'.
cval : float, optional
Constant value for ``mode='constant'``. Default is 0.0.
prefilter: bool, optional
Determines if the image's data array is prefiltered with
a spline filter before interpolation. The default is ``True``,
which will create a temporary *float64* array of filtered values
if *order > 1*. If setting this to ``False``, the output will be
slightly blurred if *order > 1*, unless the input is prefiltered,
i.e. it is the result of calling the spline filter on the original
input.
Returns
-------
resampled : `spatialimage` or ndarray
The data imaged after resampling to reference space.
"""
# Ensure the vsm has been computed
if isinstance(spatialimage, (str, bytes, Path)):
spatialimage = nb.load(spatialimage)
self.fit(spatialimage)
vsm = self.shifts.get_fdata().copy()
# Reverse shifts if reversed blips
if pe_dir.endswith("-"):
vsm *= -1.0
# Generate warp field
pe_axis = "ijk".index(pe_dir[0])
# Map voxel coordinates applying the VSM
if self.xfm is None:
ijk_axis = tuple([np.arange(s) for s in vsm.shape])
voxcoords = np.array(
np.meshgrid(*ijk_axis, indexing="ij"), dtype="float32"
).reshape(3, -1)
else:
# Map coordinates from reference to time-step
hmc_xyz = self.xfm.map(self.xfm.reference.ndcoords.T)
# Convert from RAS to voxel coordinates
voxcoords = (
np.linalg.inv(self.xfm.reference.affine)
@ _as_homogeneous(np.vstack(hmc_xyz), dim=self.xfm.reference.ndim).T
)[:3, ...]
voxcoords[pe_axis, ...] += vsm.reshape(-1) * ro_time
# Prepare data
data = np.squeeze(np.asanyarray(spatialimage.dataobj))
output_dtype = output_dtype or data.dtype
# Resample
resampled = ndi.map_coordinates(
data,
voxcoords,
output=output_dtype,
order=order,
mode=mode,
cval=cval,
prefilter=prefilter,
).reshape(spatialimage.shape)
moved = spatialimage.__class__(
resampled, spatialimage.affine, spatialimage.header
)
moved.header.set_data_dtype(output_dtype)
return moved
def to_displacements(self, ro_time, pe_dir, itk_format=True):
"""
Generate a NIfTI file containing a displacements field transform compatible with ITK/ANTs.
The displacements field can be calculated following
`Eq. (2) in the fieldmap fitting section
<sdcflows.workflows.fit.fieldmap.html#mjx-eqn-eq%3Afieldmap-2>`__.
Parameters
----------
ro_time : :obj:`float`
The total readout time in seconds (only if ``vsm=False``).
pe_dir : :obj:`str`
The ``PhaseEncodingDirection`` metadata value (only if ``vsm=False``).
Returns
-------
spatialimage : :obj:`nibabel.nifti.Nifti1Image`
A NIfTI 1.0 object containing the distortion.
"""
return fmap_to_disp(self.shifts, ro_time, pe_dir, itk_format=itk_format)
def fmap_to_disp(fmap_nii, ro_time, pe_dir, itk_format=True):
"""
Convert a fieldmap in Hz into an ITK/ANTs-compatible displacements field.
The displacements field can be calculated following
`Eq. (2) in the fieldmap fitting section
<sdcflows.workflows.fit.fieldmap.html#mjx-eqn-eq%3Afieldmap-2>`__.
Parameters
----------
fmap_nii : :obj:`os.pathlike`
Path to a voxel-shift-map (VSM) in NIfTI format
ro_time : :obj:`float`
The total readout time in seconds
pe_dir : :obj:`str`
The ``PhaseEncodingDirection`` metadata value
Returns
-------
spatialimage : :obj:`nibabel.nifti.Nifti1Image`
A NIfTI 1.0 object containing the distortion.
"""
# Set polarity & scale VSM (voxel-shift-map) by readout time
vsm = fmap_nii.get_fdata().copy() * (-ro_time if pe_dir.endswith("-") else ro_time)
# Shape of displacements field
# Note that ITK NIfTI fields are 5D (have an empty 4th dimension)
fieldshape = vsm.shape[:3] + (1, 3)
# Convert VSM to voxel displacements
pe_axis = "ijk".index(pe_dir[0])
ijk_deltas = np.zeros((vsm.size, 3), dtype="float32")
ijk_deltas[:, pe_axis] = vsm.reshape(-1)
# To convert from VSM to RAS field we just apply the affine
aff = fmap_nii.affine.copy()
aff[:3, 3] = 0 # Translations MUST NOT be applied, though.
xyz_deltas = nb.affines.apply_affine(aff, ijk_deltas)
if itk_format:
# ITK displacement vectors are in LPS orientation
xyz_deltas[..., (0, 1)] *= -1.0
xyz_nii = nb.Nifti1Image(xyz_deltas.reshape(fieldshape), fmap_nii.affine)
xyz_nii.header.set_intent("vector", name="SDC")
xyz_nii.header.set_xyzt_units("mm")
return xyz_nii
def disp_to_fmap(xyz_nii, ro_time, pe_dir, itk_format=True):
"""
Convert a displacements field into a fieldmap in Hz.
This is the inverse operation to the previous function.
Parameters
----------
xyz_nii : :obj:`os.pathlike`
Path to a displacements field in NIfTI format.
ro_time : :obj:`float`
The total readout time in seconds.
pe_dir : :obj:`str`
The ``PhaseEncodingDirection`` metadata value.
Returns
-------
spatialimage : :obj:`nibabel.nifti.Nifti1Image`
A NIfTI 1.0 object containing the field in Hz.
"""
xyz_deltas = np.squeeze(xyz_nii.get_fdata(dtype="float32")).reshape((-1, 3))
if itk_format:
# ITK displacement vectors are in LPS orientation
xyz_deltas[:, (0, 1)] *= -1
inv_aff = np.linalg.inv(xyz_nii.affine)
inv_aff[:3, 3] = 0 # Translations MUST NOT be applied.
# Convert displacements from mm to voxel units
# Using the inverse affine accounts for reordering of axes, etc.
ijk_deltas = nb.affines.apply_affine(inv_aff, xyz_deltas).astype("float32")
pe_axis = "ijk".index(pe_dir[0])
vsm = ijk_deltas[:, pe_axis].reshape(xyz_nii.shape[:3])
scale_factor = -ro_time if pe_dir.endswith("-") else ro_time
fmap_nii = nb.Nifti1Image(vsm / scale_factor, xyz_nii.affine)
fmap_nii.header.set_intent("estimate", name="Delta_B0 [Hz]")
fmap_nii.header.set_xyzt_units("mm")
return fmap_nii
def _cubic_bspline(d):
"""Evaluate the cubic bspline at distance d from the center."""
return np.piecewise(
d,
[d < 1.0, d >= 1.0],
[
lambda d: (4.0 - 6.0 * d ** 2 + 3.0 * d ** 3) / 6.0,
lambda d: (2.0 - d) ** 3 / 6.0,
],
)
def grid_bspline_weights(target_nii, ctrl_nii):
r"""
Evaluate tensor-product B-Spline weights on a grid.
For each of the *N* input samples :math:`(s_1, s_2, s_3)` and *K* control
points or *knots* :math:`\mathbf{k} =(k_1, k_2, k_3)`, the tensor-product
cubic B-Spline kernel weights are calculated:
.. math::
\Psi^3(\mathbf{k}, \mathbf{s}) =
\beta^3(s_1 - k_1) \cdot \beta^3(s_2 - k_2) \cdot \beta^3(s_3 - k_3),
\label{eq:2}\tag{2}
where each :math:`\beta^3` represents the cubic B-Spline for one dimension.
The 1D B-Spline kernel implementation uses :obj:`numpy.piecewise`, and is based on the
closed-form given by Eq. (6) of [Unser1999]_.
By iterating over dimensions, the data samples that fall outside of the compact
support of the tensor-product kernel associated to each control point can be filtered
out and dismissed to lighten computation.
Finally, the resulting weights matrix :math:`\Psi^3(\mathbf{k}, \mathbf{s})`
can be easily identified in Eq. :math:`\eqref{eq:1}` and used as the design matrix
for approximation of data.
Parameters
----------
target_nii : :obj:`nibabel.spatialimages`
An spatial image object (typically, a :obj:`~nibabel.nifti1.Nifti1Image`)
embedding the target | |
otherwise.
def decode(self, frame):
# mp3 version
version = (frame[1] >> 3) & 0x1
# channel mode.
mode = (frame[3] >> 6) & 0x3
# Find the start of the Xing header.
if version:
# +4 in all of these to skip initial mp3 frame header.
if mode != 3:
pos = 32 + 4
else:
pos = 17 + 4
else:
if mode != 3:
pos = 17 + 4
else:
pos = 9 + 4
head = frame[pos:pos + 4]
self.vbr = (head == b'Xing') and True or False
if head not in [b'Xing', b'Info']:
return False
log.debug("%s header detected @ %x" % (head, pos))
pos += 4
# Read Xing flags.
headFlags = bin2dec(bytes2bin(frame[pos:pos + 4]))
pos += 4
log.debug("%s header flags: 0x%x" % (head, headFlags))
# Read frames header flag and value if present
if headFlags & FRAMES_FLAG:
self.numFrames = bin2dec(bytes2bin(frame[pos:pos + 4]))
pos += 4
log.debug("%s numFrames: %d" % (head, self.numFrames))
# Read bytes header flag and value if present
if headFlags & BYTES_FLAG:
self.numBytes = bin2dec(bytes2bin(frame[pos:pos + 4]))
pos += 4
log.debug("%s numBytes: %d" % (head, self.numBytes))
# Read TOC header flag and value if present
if headFlags & TOC_FLAG:
self.toc = frame[pos:pos + 100]
pos += 100
log.debug("%s TOC (100 bytes): PRESENT" % head)
else:
log.debug("%s TOC (100 bytes): NOT PRESENT" % head)
# Read vbr scale header flag and value if present
if headFlags & VBR_SCALE_FLAG and head == b'Xing':
self.vbrScale = bin2dec(bytes2bin(frame[pos:pos + 4]))
pos += 4
log.debug("%s vbrScale: %d" % (head, self.vbrScale))
return True
class LameHeader(dict):
r""" Mp3 Info tag (AKA LAME Tag)
Lame (and some other encoders) write a tag containing various bits of info
about the options used at encode time. If available, the following are
parsed and stored in the LameHeader dict:
encoder_version: short encoder version [str]
tag_revision: revision number of the tag [int]
vbr_method: VBR method used for encoding [str]
lowpass_filter: lowpass filter frequency in Hz [int]
replaygain: if available, radio and audiofile gain (see below) [dict]
encoding_flags: encoding flags used [list]
nogap: location of gaps when --nogap was used [list]
ath_type: ATH type [int]
bitrate: bitrate and type (Constant, Target, Minimum) [tuple]
encoder_delay: samples added at the start of the mp3 [int]
encoder_padding: samples added at the end of the mp3 [int]
noise_shaping: noise shaping method [int]
stereo_mode: stereo mode used [str]
unwise_settings: whether unwise settings were used [boolean]
sample_freq: source sample frequency [str]
mp3_gain: mp3 gain adjustment (rarely used) [float]
preset: preset used [str]
surround_info: surround information [str]
music_length: length in bytes of original mp3 [int]
music_crc: CRC-16 of the mp3 music data [int]
infotag_crc: CRC-16 of the info tag [int]
Prior to ~3.90, Lame simply stored the encoder version in the first frame.
If the infotag_crc is invalid, then we try to read this version string. A
simple way to tell if the LAME Tag is complete is to check for the
infotag_crc key.
Replay Gain data is only available since Lame version 3.94b. If set, the
replaygain dict has the following structure:
\code
peak_amplitude: peak signal amplitude [float]
radio:
name: name of the gain adjustment [str]
adjustment: gain adjustment [float]
originator: originator of the gain adjustment [str]
audiofile: [same as radio]
\endcode
Note that as of 3.95.1, Lame uses 89dB as a reference level instead of the
83dB that is specified in the Replay Gain spec. This is not automatically
compensated for. You can do something like this if you want:
\code
import eyeD3
af = eyeD3.mp3.Mp3AudioFile('/path/to/some.mp3')
lamever = af.lameTag['encoder_version']
name, ver = lamever[:4], lamever[4:]
gain = af.lameTag['replaygain']['radio']['adjustment']
if name == 'LAME' and eyeD3.mp3.lamevercmp(ver, '3.95') > 0:
gain -= 6
\endcode
Radio and Audiofile Replay Gain are often referrered to as Track and Album
gain, respectively. See http://replaygain.hydrogenaudio.org/ for futher
details on Replay Gain.
See http://gabriel.mp3-tech.org/mp3infotag.html for the gory details of the
LAME Tag.
"""
# from the LAME source:
# http://lame.cvs.sourceforge.net/*checkout*/lame/lame/libmp3lame/VbrTag.c
_crc16_table = [
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040]
ENCODER_FLAGS = {
'NSPSYTUNE': 0x0001,
'NSSAFEJOINT': 0x0002,
'NOGAP_NEXT': 0x0004,
'NOGAP_PREV': 0x0008}
PRESETS = {
0: 'Unknown',
# 8 to 320 are reserved for ABR bitrates
410: 'V9',
420: 'V8',
430: 'V7',
440: 'V6',
450: 'V5',
460: 'V4',
470: 'V3',
480: 'V2',
490: 'V1',
500: 'V0',
1000: 'r3mix',
1001: 'standard',
1002: 'extreme',
1003: 'insane',
1004: 'standard/fast',
1005: 'extreme/fast',
1006: 'medium',
1007: 'medium/fast'}
REPLAYGAIN_NAME = {
0: 'Not set',
1: 'Radio',
2: 'Audiofile'}
REPLAYGAIN_ORIGINATOR = {
0: 'Not set',
1: 'Set by artist',
2: 'Set by user',
3: 'Set automatically',
100: 'Set by simple RMS average'}
SAMPLE_FREQUENCIES = {
0: '<= 32 kHz',
1: '44.1 kHz',
2: '48 kHz',
3: '> 48 kHz'}
STEREO_MODES = {
0: 'Mono',
1: 'Stereo',
2: 'Dual',
3: 'Joint',
4: 'Force',
5: 'Auto',
6: 'Intensity',
7: 'Undefined'}
SURROUND_INFO = {
0: 'None',
1: 'DPL encoding',
2: 'DPL2 encoding',
3: 'Ambisonic encoding',
8: 'Reserved'}
VBR_METHODS = {
0: 'Unknown',
1: 'Constant Bitrate',
2: 'Average Bitrate',
3: 'Variable Bitrate method1 (old/rh)',
4: 'Variable Bitrate method2 (mtrh)',
5: 'Variable Bitrate method3 (mt)',
6: 'Variable Bitrate method4',
8: 'Constant Bitrate (2 pass)',
9: 'Average Bitrate (2 pass)',
15: 'Reserved'}
def __init__(self, frame):
"""Read the LAME info tag.
frame should be the first frame of an mp3.
"""
super().__init__()
self.decode(frame)
def _crc16(self, data, val=0):
"""Compute a CRC-16 checksum on a data stream."""
for c in [bytes([b]) for b in data]:
val = self._crc16_table[ord(c) ^ (val & 0xff)] ^ (val >> 8)
return val
def decode(self, frame):
"""Decode the LAME info tag."""
try:
pos = frame.index(b"LAME")
except: # noqa: B901
return
log.debug('Lame info tag found at position %d' % pos)
# check the info tag crc.Iif it's not valid, no point parsing much more.
lamecrc = bin2dec(bytes2bin(frame[190:192]))
if self._crc16(frame[:190]) != lamecrc:
log.warning('Lame tag CRC check failed')
try:
# Encoder short VersionString, 9 bytes
self['encoder_version'] = str(frame[pos:pos + 9].rstrip(), "latin1")
log.debug('Lame Encoder Version: %s' % self['encoder_version'])
pos += 9
# Info Tag revision + VBR method, 1 byte
self['tag_revision'] = bin2dec(bytes2bin(frame[pos:pos + 1])[:5])
vbr_method = bin2dec(bytes2bin(frame[pos:pos + 1])[5:])
self['vbr_method'] = self.VBR_METHODS.get(vbr_method, 'Unknown')
log.debug('Lame info tag version: %s' % self['tag_revision'])
log.debug('Lame VBR method: %s' % self['vbr_method'])
pos += 1
# Lowpass filter value, 1 byte
self['lowpass_filter'] = bin2dec(
bytes2bin(frame[pos:pos + 1])) * 100
log.debug('Lame Lowpass filter value: %s Hz' %
self['lowpass_filter'])
pos += 1
# Replay Gain, | |
str,
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: Optional[int] = 1,
) -> bytes:
"""
Downloads a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to download the file.
"""
# We do not pass filename, so will never receive string as response
return self.download(
bucket_name=bucket_name,
object_name=object_name,
chunk_size=chunk_size,
timeout=timeout,
num_max_attempts=num_max_attempts,
)
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file(
self,
bucket_name: str = PROVIDE_BUCKET,
object_name: Optional[str] = None,
object_url: Optional[str] = None,
):
"""
Downloads the file to a temporary directory and returns a file handle
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param object_url: File reference url. Must start with "gs: //"
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name) as tmp_file:
self.download(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
tmp_file.flush()
yield tmp_file
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file_and_upload(
self,
bucket_name: str = PROVIDE_BUCKET,
object_name: Optional[str] = None,
object_url: Optional[str] = None,
):
"""
Creates temporary file, returns a file handle and uploads the files content
on close.
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param object_url: File reference url. Must start with "gs: //"
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name) as tmp_file:
yield tmp_file
tmp_file.flush()
self.upload(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
def upload(
self,
bucket_name: str,
object_name: str,
filename: Optional[str] = None,
data: Optional[Union[str, bytes]] = None,
mime_type: Optional[str] = None,
gzip: bool = False,
encoding: str = 'utf-8',
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: int = 1,
) -> None:
"""
Uploads a local file or file data as string or bytes to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:param object_name: The object name to set when uploading the file.
:param filename: The local file path to the file to be uploaded.
:param data: The file's data as a string or bytes to be uploaded.
:param mime_type: The file's mime type set when uploading the file.
:param gzip: Option to compress local file or file data for upload
:param encoding: bytes encoding for file data if provided as string
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to try to upload the file.
"""
def _call_with_retry(f: Callable[[], None]) -> None:
"""Helper functions to upload a file or a string with a retry mechanism and exponential back-off.
:param f: Callable that should be retried.
"""
num_file_attempts = 0
while num_file_attempts < num_max_attempts:
try:
num_file_attempts += 1
f()
except GoogleCloudError as e:
if num_file_attempts == num_max_attempts:
self.log.error(
'Upload attempt of object: %s from %s has failed. Attempt: %s, max %s.',
object_name,
object_name,
num_file_attempts,
num_max_attempts,
)
raise e
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 1.0 * 2 ** (num_file_attempts - 1)
time.sleep(timeout_seconds)
continue
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if filename and data:
raise ValueError(
"'filename' and 'data' parameter provided. Please "
"specify a single parameter, either 'filename' for "
"local file uploads or 'data' for file content uploads."
)
elif filename:
if not mime_type:
mime_type = 'application/octet-stream'
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
_call_with_retry(
partial(blob.upload_from_filename, filename=filename, content_type=mime_type, timeout=timeout)
)
if gzip:
os.remove(filename)
self.log.info('File %s uploaded to %s in %s bucket', filename, object_name, bucket_name)
elif data:
if not mime_type:
mime_type = 'text/plain'
if gzip:
if isinstance(data, str):
data = bytes(data, encoding)
out = BytesIO()
with gz.GzipFile(fileobj=out, mode="w") as f:
f.write(data)
data = out.getvalue()
_call_with_retry(partial(blob.upload_from_string, data, content_type=mime_type, timeout=timeout))
self.log.info('Data stream uploaded to %s in %s bucket', object_name, bucket_name)
else:
raise ValueError("'filename' and 'data' parameter missing. One is required to upload to gcs.")
def exists(self, bucket_name: str, object_name: str) -> bool:
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists()
def get_blob_update_time(self, bucket_name: str, object_name: str):
"""
Get the update time of a file in Google Cloud Storage
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob to get updated time from the Google cloud
storage bucket.
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
if blob is None:
raise ValueError(f"Object ({object_name}) not found in Bucket ({bucket_name})")
return blob.updated
def is_updated_after(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param ts: The timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False
def is_updated_between(
self, bucket_name: str, object_name: str, min_ts: datetime, max_ts: datetime
) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param min_ts: The minimum timestamp to check against.
:param max_ts: The maximum timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not min_ts.tzinfo:
min_ts = min_ts.replace(tzinfo=timezone.utc)
if not max_ts.tzinfo:
max_ts = max_ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s is between %s and %s", blob_update_time, min_ts, max_ts)
if min_ts <= blob_update_time < max_ts:
return True
return False
def is_updated_before(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated before given time in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param ts: The timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s < %s", blob_update_time, ts)
if blob_update_time < ts:
return True
return False
def is_older_than(self, bucket_name: str, object_name: str, seconds: int) -> bool:
"""
Check if object is older than given time
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param seconds: The time in seconds to check against
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
from datetime import timedelta
current_time = timezone.utcnow()
given_time = current_time - timedelta(seconds=seconds)
self.log.info("Verify object date: %s is older than %s", blob_update_time, given_time)
if blob_update_time < given_time:
return True
return False
def delete(self, bucket_name: str, object_name: str) -> None:
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:param object_name: name of the object to delete
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info('Blob | |
+ [self.no_im]
if hint is None:
hint = ['']
else:
hint = hint + ['']
for k_env in range(0, j + 1 - i):
if indic_dr_env[k_env] or self.read_indication_deathroom[i + k_env]:
self.read_indication_deathroom[i + k_env] = True
if indication_deathroom is None:
indication_deathroom = [self.indication_deathroom]
else:
indication_deathroom = indication_deathroom + [self.indication_deathroom]
elif indic_dr_env[k_env] == False and self.read_indication_deathroom[i + k_env] == False:
if indication_deathroom is None:
indication_deathroom = ['']
else:
indication_deathroom = indication_deathroom + ['']
if b == 2:
info_des = self.infos['description'][i:j + 1]
rewards_hint = self.rewards_hint[i:j + 1]
rewards_board = self.rewards_board[i:j + 1]
obs_env, rewards_env, rewards_hint, rewards_board, dones_env, infos_feedback, take_hint_env, im_env, \
hint_env, indic_dr_env = self.vect_step_mod(b, info_des, sample, rewards_hint, rewards_board)
self.rewards_hint[i:j + 1] = rewards_hint
self.rewards_board[i:j + 1] = rewards_board
len_sample = len(sample)
for k in iter(self.infos):
for l in range(len_sample):
if k == 'feedback':
infos[k].append(infos_feedback[l])
elif k == 'last_command':
infos[k].append(step[i + l])
elif k == 'moves':
infos[k].append(self.infos[k][i + l] + 1)
elif k == 'intermediate_reward':
infos[k].append(rewards_env[l])
elif k == 'score':
infos[k].append(self.infos[k][i + l])
else:
infos[k].append(self.infos[k][i + l])
obs = np.concatenate((obs, obs_env), axis=0)
rewards = np.concatenate((rewards, rewards_env), axis=0)
dones = np.concatenate((dones, dones_env), axis=0)
for j_env in range(0, j + 1 - i):
if im_env[j_env] or self.read_hint[i + j_env]:
self.read_hint[j_env] = True
if im is None:
im = [self.im]
else:
im = im + [self.im]
if hint is None:
hint = [self.hint]
else:
hint = hint + [self.hint]
elif im_env[j_env] == False and self.read_hint[i + j_env] == False:
if im is None:
im = [self.no_im]
else:
im = im + [self.no_im]
if hint is None:
hint = ['']
else:
hint = hint + ['']
for k_env in range(0, j + 1 - i):
if indic_dr_env[k_env] or self.read_indication_deathroom[i + k_env]:
self.read_indication_deathroom[i + k_env] = True
if indication_deathroom is None:
indication_deathroom = [self.indication_deathroom]
else:
indication_deathroom = indication_deathroom + [self.indication_deathroom]
elif indic_dr_env[k_env] == False and self.read_indication_deathroom[i + k_env] == False:
if indication_deathroom is None:
indication_deathroom = ['']
else:
indication_deathroom = indication_deathroom + ['']
if b == 3:
info_des = self.infos['description'][i:j + 1]
rewards_hint = self.rewards_hint[i:j + 1]
rewards_board = self.rewards_board[i:j + 1]
obs_env, rewards_env, rewards_hint, rewards_board, dones_env, infos_feedback, take_hint_env, im_env, \
hint_env, indic_dr_env = self.vect_step_mod(b, info_des, sample, rewards_hint, rewards_board)
self.rewards_hint[i:j + 1] = rewards_hint
self.take_hint[i:j + 1] = take_hint_env
len_sample = len(sample)
for k in iter(self.infos):
for l in range(len_sample):
if k == 'feedback':
infos[k].append(infos_feedback[l])
elif k == 'last_command':
infos[k].append(step[i + l])
elif k == 'moves':
infos[k].append(self.infos[k][i + l] + 1)
elif k == 'intermediate_reward':
infos[k].append(rewards_env[l])
elif k == 'score':
infos[k].append(self.infos[k][i + l])
else:
infos[k].append(self.infos[k][i + l])
obs = np.concatenate((obs, obs_env), axis=0)
rewards = np.concatenate((rewards, rewards_env), axis=0)
dones = np.concatenate((dones, dones_env), axis=0)
for j_env in range(0, j + 1 - i):
if im_env[j_env] or self.read_hint[i + j_env]:
self.read_hint[j_env] = True
if im is None:
im = [self.im]
else:
im = im + [self.im]
if hint is None:
hint = [self.hint]
else:
hint = hint + [self.hint]
elif im_env[j_env] == False and self.read_hint[i + j_env] == False:
if im is None:
im = [self.no_im]
else:
im = im + [self.no_im]
if hint is None:
hint = ['']
else:
hint = hint + ['']
for k_env in range(0, j + 1 - i):
if indic_dr_env[k_env] or self.read_indication_deathroom[i + k_env]:
self.read_indication_deathroom[i + k_env] = True
if indication_deathroom is None:
indication_deathroom = [self.indication_deathroom]
else:
indication_deathroom = indication_deathroom + [self.indication_deathroom]
elif indic_dr_env[k_env] == False and self.read_indication_deathroom[i + k_env] == False:
if indication_deathroom is None:
indication_deathroom = ['']
else:
indication_deathroom = indication_deathroom + ['']
elif b == 0:
completed_sample = [' '] * i + sample + [' '] * (self.batch_size - (j + 1))
obs_env, rewards_env, dones_env, infos_env = self.env.step(completed_sample)
obs_env = obs_env[i:j + 1]
rewards_env = rewards_env[i:j + 1]
dones_env = dones_env[i:j + 1]
obs_env, infos_description = self.vect_infos_obs_change(obs_env, infos_env["description"][i:j + 1],
room_of_the_hint=self.room_of_the_hint,
beginning=self.beginning_room,
take_hint=self.take_hint[i:j + 1])
infos_env['description'][i:j + 1] = infos_description
obs = np.concatenate((obs, obs_env), axis=0)
rewards = np.concatenate((rewards, rewards_env), axis=0)
dones = np.concatenate((dones, dones_env), axis=0)
len_sample = len(sample)
for k in iter(self.infos):
for l in range(len_sample):
if k == 'intermediate_reward':
infos[k].append(0)
elif k == 'score':
infos[k].append(rewards_env[l])
else:
infos[k].append(infos_env[k][l])
for j_env in range(i, j + 1):
if self.read_hint[j_env]:
if im is None:
im = [self.im]
else:
im = im + [self.im]
if hint is None:
hint = [self.hint]
else:
hint = hint + [self.hint]
elif self.read_hint[j_env] == False:
if im is None:
im = [self.no_im]
else:
im = im + [self.no_im]
if hint is None:
hint = ['']
else:
hint = hint + ['']
for k_env in range(i, j + 1):
if self.read_indication_deathroom[k_env]:
if indication_deathroom is None:
indication_deathroom = [self.indication_deathroom]
else:
indication_deathroom = indication_deathroom + [self.indication_deathroom]
elif self.read_indication_deathroom[k_env] == False:
if indication_deathroom is None:
indication_deathroom = ['']
else:
indication_deathroom = indication_deathroom + ['']
elif b == 4:
len_sample = len(sample)
for l in range(len_sample):
obs = np.concatenate((obs, ['you die! :\r\n']), axis=0)
r = self.rewards_death[i + l]
self.rewards_death[i + l] = np.minimum(0, self.rewards_death[i + l] + 1)
rewards = np.concatenate((rewards, [r]), axis=0)
dones = np.concatenate((dones, [True]), axis=0)
for j_env in range(i, j + 1):
if self.read_hint[j_env]:
if im is None:
im = [self.im]
else:
im = im + [self.im]
if hint is None:
hint = [self.hint]
else:
hint = hint + [self.hint]
elif self.read_hint[j_env] == False:
if im is None:
im = [self.no_im]
else:
im = im + [self.no_im]
if hint is None:
hint = ['']
else:
hint = hint + ['']
for k_env in range(i, j + 1):
if self.read_indication_deathroom[k_env]:
if indication_deathroom is None:
indication_deathroom = [self.indication_deathroom]
else:
indication_deathroom = indication_deathroom + [self.indication_deathroom]
elif self.read_indication_deathroom[k_env] == False:
if indication_deathroom is None:
indication_deathroom = ['']
else:
indication_deathroom = indication_deathroom + ['']
for k in iter(self.infos):
for l in range(len_sample):
if k == 'intermediate_reward':
infos[k].append(int(rewards[i + l]))
elif k == 'description':
infos[k].append('-= ' + self.death_room + ' =-*** The End ***')
elif k == 'feedback':
infos[k].append('you die! :\r\n')
elif k == 'last_command':
infos[k].append(None)
elif k == 'moves':
infos[k].append(self.infos[k][i + l] + 1)
elif k == 'score':
infos[k].append(self.infos[k][i + l])
else:
infos[k].append(self.infos[k][i + l])
i = j + 1
info_des = infos['description']
if self.upgradable_color_way:
partial_view_batch = []
for i_batch in range(self.batch_size):
self.current_room[i_batch] = info_des[i_batch].split('-= ')[1].split(' =-')[0].lower()
if self.rewards_hint[i_batch] <= 0:
partial_view_pic = pic_player_to_kitchen(self.rooms_dict, self.dict_game_goals,
self.center_visited_rooms, self.pic_size,
self.current_room[i_batch], room_name=self.room_name,
dict_rooms_numbers=self.dict_rooms_nbr,
name_type=self.name_type,
draw_passages=self.draw_passages,
draw_player=self.draw_player)
partial_view_pic_size = partial_view_pic.size
desired_size = 500
delta_w = desired_size - partial_view_pic_size[0]
delta_h = desired_size - partial_view_pic_size[1]
padding = (delta_w, delta_h, 0, 0)
new_im = ImageOps.expand(partial_view_pic, padding)
partial_view_batch.append(transform(new_im).float())
else:
partial_view_batch.append(self.no_im)
partial_view_batch = torch.stack(partial_view_batch)
self.infos = infos
im = torch.stack(im)
return obs, rewards, dones, infos, im, partial_view_batch, hint, indication_deathroom
self.infos = infos
im = torch.stack(im)
return obs, rewards, dones, infos, im, torch.stack([self.no_im] * self.batch_size), hint, indication_deathroom
def step_mod(self, need_step_mod, infos_description, step, rewards_hint, rewards_board):
"""
:param step: the step that the agents want to do
:return: special observations, rewards for having read the
hint, reward for having read the board, done, infos_description, if there is an image displayed (Bool),
a hint read(Bool), an indication to avoid the death room(Bool)
"""
room = infos_description.split('-= ')[1].split(' =-')[0]
if need_step_mod == 1:
obs = 'You have picked up thye hint. Reading it reveals the visual hint!\r\n'
infos_description = ' -= ' + room + ' =- ' + obs
dones = False
return obs, 0, rewards_hint, rewards_board, dones, infos_description, True, False, False, False
elif need_step_mod == 2 and (
'read the hint' == step or 'read hint' == step
or 'look the hint' == step or 'look at the hint' == step or 'look hint' == step):
obs = 'You have accessed the visual hint! Follow the textual advice:\r\n' + self.hint
infos_description = ' -= ' + room + ' =- ' + obs
rewards = rewards_hint
rewards_hint = np.maximum(0, rewards_hint - 1) # rewarded only for the first reading
dones = False
return obs, rewards, rewards_hint, rewards_board, dones, infos_description, False, True, True, False
elif need_step_mod == 3:
obs = 'You have accessed the visual hint! Follow the | |
<reponame>wx-b/dm_robotics
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IK solver for initialization of robot arms."""
import copy
from typing import List, NamedTuple, Optional, Sequence, Union
from absl import logging
from dm_control import mjcf
from dm_control.mujoco.wrapper import mjbindings
from dm_control.mujoco.wrapper.mjbindings.enums import mjtObj
from dm_robotics.controllers import cartesian_6d_to_joint_velocity_mapper
from dm_robotics.geometry import geometry
from dm_robotics.geometry import mujoco_physics
from dm_robotics.transformations import transformations as tr
import numpy as np
# Default value for the nullspace gain parameter.
_NULLSPACE_GAIN = 0.4
# Gain for the linear and angular twist computation, these values should always
# be between 0 and 1. 0 corresponds to not move and 1 corresponds to move to the
# target in a single integration timestep.
_LINEAR_VELOCITY_GAIN = 0.95
_ANGULAR_VELOCITY_GAIN = 0.95
# Integration timestep used when solving the IK.
_INTEGRATION_TIMESTEP_SEC = 1.0
# At each step of the solve, we measure how much the tracked element
# translated (linear progress) and rotated (angular progress). We compare this
# progress to the total linear and angular error and if not enough progress is
# made stop the solve before the maximum number of steps is reached.
_ERR_TO_PROGRESS_THRESHOLD = 20.0
### ---------------PARAMETERS USED FOR THE QP MAPPER: START----------------- ###
# Regularisation parameter used by the qp to compute joint velocities.
_REGULARIZATION_WEIGHT = 0.01
# Ensure that the joint limits are respected.
_ENABLE_JOINT_POSITION_LIMITS = True
# Gain that scales the joint velocities down when close to the joint limits.
_JOINT_POSITION_LIMIT_VELOCITY_SCALE = 1.0
# The minimal distance to joint limits the IK solution can have.
_MINIMUM_DISTANCE_FROM_JOINT_POSITION_LIMIT = 0.0
# Maximum number of iteration to find a joint velocity command that applies the
# desired twist to the element.
_MAX_CARTESIAN_VELOCITY_CONTROL_ITERATIONS = 300
# Number of iterations for the nullspace control problem.
_MAX_NULLSPACE_CONTROL_ITERATIONS = 300
# Maximum error allowed for the nullspace problem.
_NULLSPACE_PROJECTION_SLACK = 1e-5
# Maximum error allowed between the requested twist command and the actual one.
_SOLUTION_TOLERANCE = 1e-4
# Remove the logging when the nullspace cannot find a solution as this
# clutters the logging.
_LOG_NULLSPACE_FAILURE_WARNINGS = False
### -----------------PARAMETERS USED FOR THE QP MAPPER: END----------------- ###
_Binding = Union[mjcf.physics.Binding, mjcf.physics._EmptyBinding] # pylint: disable=protected-access
_MjcfElement = mjcf.element._ElementImpl # pylint: disable=protected-access
class _Solution(NamedTuple):
"""Return value of an ik solution.
Attributes:
qpos: The joint configuration.
linear_err: The linear error between the target pose and desired pose.
angular_err: The angular error between the target pose and desired pose.
"""
qpos: np.ndarray
linear_err: float
angular_err: float
class IkSolver():
"""Inverse kinematics solver.
This class computes a joint configuration that brings an element to a certain
pose.
"""
# The cartesian velocity controller used to solve the IK.
_qp_mapper: cartesian_6d_to_joint_velocity_mapper.Mapper
# Array of indices that sorts the joints in ascending order. The qp_mapper
# returns values in joint-ID ascending order which could be different than
# the order of the joints provided by the user.
_joints_argsort: List[int]
# The desired joint configuration that is set as the nullspace goal. This
# corresponds to the mid-range of each joint. The user can override this
# reference configuration in the `solve` method.
_nullspace_joint_position_reference: List[float]
def __init__(
self,
model: mjcf.RootElement,
controllable_joints: List[_MjcfElement],
element: _MjcfElement,
nullspace_gain: float = _NULLSPACE_GAIN,
):
"""Constructor.
Args:
model: The MJCF model root.
controllable_joints: The joints that can be controlled to achieve
the desired target pose. Only 1 DoF joints are supported.
element: The MJCF element that is being placed by the inverse kinematics
solver. Only body, geoms, and sites are supported
nullspace_gain: Scales the nullspace velocity bias. If the gain is set to
0, there will be no nullspace optimization during the solve process.
"""
self._physics = mjcf.Physics.from_mjcf_model(model)
self._geometry_physics = mujoco_physics.wrap(self._physics)
self._joints_binding = _binding(self._physics, controllable_joints)
self._num_joints = len(controllable_joints)
self._element = element
self._nullspace_gain = nullspace_gain
self._create_qp_mapper()
def solve(self,
ref_pose: geometry.Pose,
linear_tol: float = 1e-3,
angular_tol: float = 1e-3,
max_steps: int = 100,
early_stop: bool = False,
num_attempts: int = 30,
stop_on_first_successful_attempt: bool = False,
inital_joint_configuration: Optional[np.ndarray] = None,
nullspace_reference: Optional[np.ndarray] = None
) -> Optional[np.ndarray]:
"""Attempts to solve the inverse kinematics.
This method computes joint configuration that solves the inverse kinematics
problem. Returns None if no solution is found. If multiple solutions are
found, the solver will return the one where the joints are closer to the
`nullspace_reference`. If none is provided uses the center of the joint
ranges
Args:
ref_pose: Target pose of the controlled element, it must be
in the world frame.
linear_tol: The linear tolerance, in meters, that determines if the
solution found is valid.
angular_tol: The angular tolerance, in radians, to determine if the
solution found is valid.
max_steps: Maximum number of integration steps that can be used. The
larger the number of steps the more likely it is a solution will be
found but a larger number of steps increases computation time.
early_stop: If true, stops the attempt as soon as the configuration is
within the linear and angular tolerances. If false, it will always run
`max_steps` iterations per attempt and return the last configuration.
num_attempts: The number of different attempts the solver should do.
For a given target pose, there exists an infinite number of possible
solutions, having more attempts allows to compare different joint
configurations. The solver will return the solution where the joints are
closer to the `nullspace_reference`. Note that not all attempts
are successful, and thus, having more attempts gives better chances of
finding a correct solution.
stop_on_first_successful_attempt: If true, the method will return the
first solution that meets the tolerance criteria. If false, returns the
solution where the joints are closer the center of their respective
range.
inital_joint_configuration: A joint configuration that will be used for
the first attempt. This can be useful in the case of a complex pose,
a user could provide the initial guess that is close to the desired
solution. If None, all the joints will be set to 0 for the first
attempt.
nullspace_reference: The desired joint configuration. When the controlled
element is in the desired pose, the solver will try and bring the joint
configuration closer to the nullspace reference without moving the
element. If no nullspace reference is provided, the center of the joint
ranges is used as reference.
Returns:
If a solution is found, returns the corresponding joint configuration.
If the inverse kinematics failed, returns None.
Raises:
ValueError: If the `nullspace_reference` does not have the correct length.
ValueError: If the `inital_joint_configuration` does not have the correct
length.
"""
nullspace_reference = (
nullspace_reference or self._nullspace_joint_position_reference)
if len(nullspace_reference) != self._num_joints:
raise ValueError(
'The provided nullspace reference does not have the right number of '
f'elements expected length of {self._num_joints}.'
f' Got {nullspace_reference}')
if inital_joint_configuration is not None:
if len(inital_joint_configuration) != self._num_joints:
raise ValueError(
'The provided inital joint configuration does not have the right '
f'number of elements expected length of {self._num_joints}.'
f' Got {inital_joint_configuration}')
inital_joint_configuration = inital_joint_configuration or np.zeros(
self._num_joints)
nullspace_jnt_qpos_min_err = np.inf
sol_qpos = None
success = False
# Each iteration of this loop attempts to solve the inverse kinematics.
# If a solution is found, it is compared to previous solutions.
for attempt in range(num_attempts):
# Use the user provided joint configuration for the first attempt.
if attempt == 0:
self._joints_binding.qpos[:] = inital_joint_configuration
else:
# Randomize the initial joint configuration so that the IK can find
# different solutions.
qpos_new = np.random.uniform(
self._joints_binding.range[:, 0], self._joints_binding.range[:, 1])
self._joints_binding.qpos[:] = qpos_new
# Solve the IK.
joint_qpos, linear_err, angular_err = self._solve_ik(
ref_pose, linear_tol, angular_tol, max_steps,
early_stop, nullspace_reference)
# Check if the attempt was successful. The solution is saved if the joints
# are closer to the nullspace reference.
if (linear_err <= linear_tol and angular_err <= angular_tol):
success = True
nullspace_jnt_qpos_err = np.linalg.norm(
joint_qpos - nullspace_reference)
if nullspace_jnt_qpos_err < nullspace_jnt_qpos_min_err:
nullspace_jnt_qpos_min_err = nullspace_jnt_qpos_err
| |
from starling_sim.basemodel.trace.trace import Traced
from starling_sim.basemodel.trace.events import InputEvent
from starling_sim.basemodel.agent.operators.operator import Operator
from starling_sim.utils.utils import json_load, validate_against_schema, \
add_defaults_and_validate
from starling_sim.utils.constants import STOP_POINT_POPULATION
from starling_sim.utils.paths import common_inputs_folder
from jsonschema import ValidationError
from json import JSONDecodeError
import traceback
import random
import os
from copy import deepcopy
class DynamicInput(Traced):
"""
This class manages the agent entering the simulation.
It generates base agents at the beginning of the simulation, and
dynamically adds agents in the environment during the simulation.
"""
def __init__(self, agent_type_dict):
super().__init__("INPUT")
#: correspondence dict between agent_type and class
self.agent_type_class = agent_type_dict
self.agent_type_schemas = None
self.dynamic_feature_list = None
def __str__(self):
"""
Gives a string display to the dynamic input
:return:
"""
return "[dynamicInput: model={}, randomSeed={}]" \
.format(self.sim.name, self.sim.randomSeed)
def setup(self, simulation_model):
"""
Set the simulation model attribute and generate the base agents.
:param simulation_model: SimulationModel
"""
# set the simulation model attribute
self.sim = simulation_model
self.agent_type_schemas = self.sim.get_agent_type_schemas()
# set the attribute of dynamic features
self.dynamic_feature_list = self.feature_list_from_file(
self.sim.parameters["dynamic_input_file"])
# sort list according to origin times
self.dynamic_feature_list = sorted(self.dynamic_feature_list, key=lambda x: x["properties"]["origin_time"])
# get the list of static features (present at the start of the simulation)
init_files = self.sim.parameters["init_input_file"]
# if there are several files, concatenate their feature lists
if isinstance(init_files, list):
init_feature_list = []
for filename in init_files:
init_feature_list += self.feature_list_from_file(filename)
else:
init_feature_list = self.feature_list_from_file(init_files)
# resolve the modes of the agent types
self.resolve_type_modes_from_inputs(init_feature_list + self.dynamic_feature_list)
init_without_operators = []
# create the operators agents
for feature in init_feature_list:
agent_type = feature["properties"]["agent_type"]
agent_class = self.agent_type_class[agent_type]
if issubclass(agent_class, Operator):
self.new_agent_input(feature)
else:
init_without_operators.append(feature)
# pre process the positions of the rest of the init input
self.pre_process_position_coordinates(init_without_operators)
# create the rest of the init input
for feature in init_without_operators:
# generate a new agent based on the feature properties
self.new_agent_input(feature)
# pre process the positions of the dynamic input
self.pre_process_position_coordinates(self.dynamic_feature_list)
def feature_schema_validation(self, feature):
# validate against Feature schema
validate_against_schema(feature, "geojson/Feature.json")
# test if the feature has an 'agent_type' property
if "agent_type" not in feature["properties"]:
raise KeyError("Error in {} : input features must contain an 'agent_type' property".format(feature))
# validate and set defaults using the schema corresponding to the agent type
agent_schema = self.agent_type_schemas[feature["properties"]["agent_type"]]
props = feature["properties"]
final_props = add_defaults_and_validate(props, agent_schema)
feature["properties"] = final_props
return feature
def play_dynamic_input_(self):
"""
Add agents to the simulation over time.
Agents are created based on the dynamic input file,
where the generation time is specified as 'origin_time'.
"""
for feature in self.dynamic_feature_list:
# TODO : check the feature schema ? duplicate with FeatureCollection check
# see if an offset should be applied to the input origin time
if "early_dynamic_input" in self.sim.parameters and self.sim.parameters["early_dynamic_input"]:
early_input_time_offset = self.sim.parameters["early_dynamic_input"]
else:
early_input_time_offset = 0
# compute the effective generation time
generation_time = int(feature["properties"]["origin_time"]) - early_input_time_offset
# check that the generation time is positive
if generation_time < 0:
self.log_message("Feature {} cannot be generated at {}, "
"generation at time 0 instead.".format(feature, generation_time), 30)
generation_time = 0
# wait for the next generation
waiting_time = generation_time - self.sim.scheduler.now()
yield self.sim.scheduler.timeout(waiting_time)
# generate new agent
self.new_agent_input(feature)
def new_agent_input(self, feature):
"""
Create and initialise a new agent, and add it to the simulation environment.
:param feature:
:return:
"""
# validate the feature and add default values
try:
feature = self.feature_schema_validation(feature)
except Exception as e:
self.log_message("Agent input was not completed due to the following error : {}".format(str(e)), 30)
return
# get the agent input dict
input_dict = feature["properties"]
if "operator_id" in input_dict:
# link with operator
self.add_key_operator(input_dict)
# pre-process the agent input dict
self.pre_process_input_dict(input_dict)
# the agent is associated to the population of its type and other provided populations
if "population" in input_dict:
populations = input_dict["population"]
if isinstance(populations, list):
populations.append(input_dict["agent_type"])
else:
populations = [populations, input_dict["agent_type"]]
# only keep distinct populations
populations = set(populations)
populations = list(populations)
else:
populations = input_dict["agent_type"]
# get the agent type
agent_type = input_dict["agent_type"]
if agent_type in self.agent_type_class:
# get the class to generate
agent_class = self.agent_type_class[agent_type]
# generate the new agent
new_agent = agent_class.__new__(agent_class)
# initialise the new agent
try:
new_agent.__init__(self.sim, **input_dict)
except (TypeError, KeyError, ValidationError):
# if the initialisation fails, log and leave
self.log_message("Instantiation of {} failed with message :\n {}"
.format(self.agent_type_class[agent_type], traceback.format_exc()), 30)
exit(1)
# add the agent to the simulation environment
self.add_agent_to_simulation(new_agent, populations)
return new_agent
else:
self.log_message("Unknown agent_type {}. Model agent types are {}."
.format(agent_type, list(self.agent_type_class.keys())), 30)
return
def add_agent_to_simulation(self, agent, populations):
"""
Add the agent to the simulation environment.
Add the agent to its population, then trace an input event
and start its simpy loop.
:param agent: Agent object
:param populations: population(s) where the agent belongs
"""
# add agent to relevant population
self.sim.agentPopulation.new_agent_in(agent, populations)
# trace and log input event
self.trace_event(InputEvent(self.sim.scheduler.now(), agent))
# add the agent loop to the event manager
agent.main_process = self.sim.scheduler.new_process(agent.simpy_loop_())
# get and manage input dicts from the input files
def feature_list_from_file(self, filename):
"""
Get the list of input features from the given filename.
The file must be a geojson, following the FeatureCollection schema,
and be stored in the input folder.
:param filename: name of the input file, stored in the input folder
:return: list of geojson Feature dicts
"""
if filename is None:
return []
# complete the file path with the input folder path
filepath = self.sim.parameters["input_folder"] + filename
# if the file does not exist, look in the common inputs folder
if not os.path.exists(filepath):
filepath = common_inputs_folder() + filename
if not os.path.exists(filepath):
raise FileNotFoundError("Input file {} not found in scenario inputs folder "
"or common inputs folder".format(filename))
# read the dict contained in input file
try:
geojson_input = json_load(filepath)
# TODO : validate against FeatureCollection
except JSONDecodeError as e:
self.log_message("Error while decoding input file {} : {}\n "
"Are you sure the file is a JSON ?".format(filename, e), 40)
raise e
except ValidationError as e:
self.log_message("Error while validating the input data : {}\n Are you sure "
"the json follows the FeatureCollection schema ?".format(e), 40)
raise e
# return the feature list
return geojson_input["features"]
def make_demand_static(self):
if "make_static" in self.sim.parameters \
and self.sim.parameters["make_static"] in ["all", "prebooked", "prebooked_only", "ghosts"]:
# also add the agents of dynamic input that are prebooked
dynamic_features = []
make_static = self.sim.parameters["make_static"]
for feature in self.dynamic_feature_list:
properties = feature["properties"]
if make_static == "all":
self.new_agent_input(feature)
elif make_static == "prebooked" and properties["prebooked"]:
self.new_agent_input(feature)
elif make_static == "prebooked_only":
if properties["prebooked"]:
self.new_agent_input(feature)
elif make_static == "ghosts":
self.new_agent_input(feature)
else:
dynamic_features.append(feature)
# store the dynamic ones back
self.dynamic_feature_list = dynamic_features
def pre_process_position_coordinates(self, features):
"""
Add a position to the features with coordinates inputs.
Group the features by modes and call localisations_nearest_nodes environment method,
then update the features with the resulting positions.
:param features: features to pre process
"""
# create a global dict that associates modes to a list of features and their information
pre_process_dict = dict()
# get the model modes dict
model_modes = self.sim.modes
# base structure of the content associated to modes
base_nearest_nodes_dict = {
"inputs": [],
"keys": [],
"lon": [],
"lat": [],
"nearest_nodes": None
}
# browse the features
for feature in features:
input_dict = feature["properties"]
# prepare structures for storing feature information
inputs = []
keys = []
lon = []
lat = []
# look for coordinates inputs
origin_coordinates = self.get_position_coordinates_from_feature(feature, "origin")
if origin_coordinates is not None:
# deprecated properties 'origin_lon' and 'origin_lat'
if origin_coordinates == [0, 0] and "origin_lon" in input_dict and "origin_lat" in input_dict:
self.log_message("Use of 'origin_lon' and 'origin_lat' is deprecated, "
"using the feature geometry is preferred", 30)
origin_coordinates = [input_dict["origin_lon"], input_dict["origin_lat"]]
inputs.append(input_dict)
keys.append("origin")
lon.append(origin_coordinates[0])
lat.append(origin_coordinates[1])
destination_coordinates = self.get_position_coordinates_from_feature(feature, "destination")
if destination_coordinates is not None:
# deprecated properties 'destination_lon' and 'destination_lat'
if destination_coordinates == [0, 0] \
and "destination_lon" in input_dict and "destination_lat" in input_dict:
self.log_message("Use of 'destination_lon' and 'destination_lat' is deprecated, "
"using the feature geometry is preferred", 30)
destination_coordinates = [input_dict["destination_lon"], input_dict["destination_lat"]]
inputs.append(input_dict)
keys.append("destination")
lon.append(destination_coordinates[0])
lat.append(destination_coordinates[1])
# if there are coordinates inputs, add them to the modes dict
if len(inputs) != 0:
# get the modes of the input
modes = model_modes[input_dict["agent_type"]]
# if the dict | |
in self.loaded_years[:-2]:
button.state = "normal"
# Check if we have selected at least one Year and one Region.
def check_if_ry(self, *args):
c = 0
for keylist in self.loaded_regions.values():
for item in keylist:
if item.state == "down":
c = 1
break
if c == 1:
break
for j in self.loaded_years:
if j.state == "down":
c += 1
break
if c == 2:
return True
else:
return False
# Prepare a combined and filtered dict, with user's region/year selection.
def init_iry_iteration(self):
indicator = [self.id_conn[i] for i in self.sorted_indicators]
regions = sorted(
[i.text for j in self.loaded_regions.values() for i in j if i.state == "down"])
years = sorted([i.text for i in self.loaded_years if i.state == "down"])
# Set the default values.
regions.insert(0, "Region")
years.insert(0, "Year")
self.iry_iteration["i"] = indicator
self.iry_iteration["r"] = regions
self.iry_iteration["y"] = years
def init_indicator_var_iry(self):
# Create indicator ID drop list.
self.dropdown_i = DropDown(auto_width=False, width=90)
for i in self.iry_iteration["i"]:
btn = Factory.IRY_OptionBtn(text=i, on_press=self.dropdown_i.dismiss)
btn.bind(on_release=partial(self.update_iry_preview, "indicator", btn.text))
btn.bind(on_release=lambda btn: self.dropdown_i.select(btn.text))
self.dropdown_i.add_widget(btn)
mainbutton_i = Factory.IRY_MainbuttonBtn(text="IA")
mainbutton_i.bind(on_release=self.dropdown_i.open)
mainbutton_i.bind(on_release=lambda x: setattr(
x, "background_normal", './Sources/selected_iry_down.png'))
self.iry_table.add_widget(mainbutton_i)
self.dropdown_i.bind(on_select=lambda instance, x: setattr(mainbutton_i, 'text', x))
self.dropdown_i.bind(on_dismiss=lambda instance: setattr(
mainbutton_i, "background_normal", './Sources/selected_iry_normal.png'))
# Create region drop list.
self.dropdown_r = DropDown(auto_width=False, width=90)
for r in self.iry_iteration["r"]:
btn = Factory.IRY_OptionBtn(text=r, on_press=self.dropdown_r.dismiss)
btn.bind(on_release=partial(self.update_iry_preview, "region", btn.text))
btn.bind(on_release=lambda btn: self.dropdown_r.select(btn.text))
self.dropdown_r.add_widget(btn)
mainbutton_r = Factory.IRY_MainbuttonBtn(text="[color=ff0080][Region][/color]")
mainbutton_r.bind(on_release=self.dropdown_r.open)
mainbutton_r.bind(on_release=lambda x: setattr(
x, "background_normal", './Sources/selected_iry_down.png'))
self.iry_table.add_widget(mainbutton_r)
self.dropdown_r.bind(on_select=lambda instance, x: setattr(
mainbutton_r, 'text', "[color=ff0080]["+x+"][/color]"))
self.dropdown_r.bind(on_dismiss=lambda instance: setattr(
mainbutton_r, "background_normal", './Sources/selected_iry_normal.png'))
# Create year drop list.
self.dropdown_y = DropDown(auto_width=False, width=90)
for y in self.iry_iteration["y"]:
btn = Factory.IRY_OptionBtn(text=y, on_press=self.dropdown_y.dismiss)
btn.bind(on_release=partial(self.update_iry_preview, "year", btn.text))
btn.bind(on_release=lambda btn: self.dropdown_y.select(btn.text))
self.dropdown_y.add_widget(btn)
mainbutton_y = Factory.IRY_MainbuttonBtn(text="[color=0d88d2][Year][/color]")
mainbutton_y.bind(on_release=self.dropdown_y.open)
mainbutton_y.bind(on_release=lambda x: setattr(
x, "background_normal", './Sources/selected_iry_down.png'))
self.iry_table.add_widget(mainbutton_y)
self.dropdown_y.bind(on_select=lambda instance, x: setattr(
mainbutton_y, 'text', "[color=0d88d2]["+x+"][/color]"))
self.dropdown_y.bind(on_dismiss=lambda instance: setattr(
mainbutton_y, "background_normal", './Sources/selected_iry_normal.png'))
def update_iry_preview(self, *args):
if args[0] == "indicator":
self.iry_preview.indicator = args[1]
elif args[0] == "region":
if args[1] != "Region":
self.iry_preview.region = "[b]["+args[1]+"][/b]"
else:
self.iry_preview.region = "["+args[1]+"]"
else:
if args[1] != "Year":
self.iry_preview.year = "[b]["+args[1]+"][/b]"
else:
self.iry_preview.year = "["+args[1]+"]"
# Check if string is number.
@staticmethod
def is_number(s):
try:
if s[-1] in "0123456789.":
return True
except IndexError:
return False
# Every time an item is selected.
def formula_selected_item(self, item):
if self.formula_items["last_item"]:
self.formula_items["last_item"].background_normal = './Sources/formula_item_normal.png'
if item.text:
item.background_normal = './Sources/formula_item_down.png'
else:
item.background_normal = './Sources/formula_empty_item_down.png'
self.formula_items["last_item"] = item
self.parenthesis_handler(item)
# Calculator's button manager.
def calc_btn_pressed(self, t):
# Ref my_formula children list.
fc = self.my_formula.children
# Ref Last Item.
li = self.formula_items["last_item"]
# Ref Last Item index.
ili = fc.index(li)
# If a number was pressed.
if t in "0123456789.":
# And this number is not the first item of the formula.
if len(fc)-1 != ili:
# If current selection is a number.
if self.is_number(li.text):
# If item above contains no "." or pressed button is not ".".
if not ("." in li.text) or ("." != t):
# Move "selection" of last_item right to the next item (an empty item).
self.formula_selected_item(fc[ili-1])
# Ref last item index again because we changed that above.
ili = fc.index(self.formula_items["last_item"])
# Item above contains "." and button pressed is "." too.
else:
# Do not add it.
return None
# If previous item from current selection is a number.
if self.is_number(fc[ili+1].text):
# If item above contains no "." or pressed button is not "." .
if not ("." in fc[ili+1].text) or ("." != t):
# Update previous number item.
fc[ili+1].text += t
# After we either updated previous item or decided not to, do nothing else.
return None
# If this number is "." or "0".
if t == "." or t == "0":
# Change number into "0."
t = "0."
# If last item is a blank space.
if li.text == "":
# Place item 1 spot right.
index = fc.index(li)
else:
# Place item 2 spots right.
index = fc.index(li)-1
# Check if this is an index variable and create the new calc item.
if t[0:6] == "[color":
new_calc_item = Factory.Calc_Formula_Item(text=t,
markup=True,
on_press=self.formula_selected_item)
elif t[0:10] == "<function>":
t = "[color=f44336][i][font=timesi]"+t[10:]+"[/font][/i][/color]"
new_calc_item = Factory.Calc_Formula_Item(text=t,
markup=True,
on_press=self.formula_selected_item)
else:
new_calc_item = Factory.Calc_Formula_Item(text=t,
color=(0, 0, 0, 1),
bold=True,
on_press=self.formula_selected_item)
# Insert formula item.
self.my_formula.add_widget(new_calc_item, index)
# Creation of new space item.
self.formula_spacer(index)
# Locate possible parentheses errors.
self.validate_parentheses()
# Creation of empty space.
def formula_spacer(self, index):
# Creation of new space item.
new_space_item = Factory.Calc_Formula_Item(text="",
on_press=self.formula_selected_item)
# Insert formula space.
self.my_formula.add_widget(new_space_item, index)
# Set this to be the active item.
self.formula_selected_item(new_space_item)
# Find parenthesis closure point.
def parenthesis_handler(self, selection):
# Change states of previously selected parentheses, back to normal.
for p in self.formula_items["p_group"]:
p.color = (0, 0, 0, 1)
self.formula_items["p_group"] = []
f = self.my_formula.children
i = f.index(selection)
if selection.text == "(" or selection.text == ")":
# By default selected parenthesis is not a reversed one.
reverse = False
# Ref p_text to current parenthesis.
p_text = selection.text
# Ref rev_p_text to the reverse parenthesis.
if p_text == "(":
rev_p_text = ")"
else:
rev_p_text = "("
# Selected parenthesis is a reversed one.
reverse = True
p, rev_p = 0, 0
# Iter upwards or downwards depending the parenthesis direction.
for item in f[i:] if reverse else f[i::-1]:
if item.text == p_text:
p += 1
elif item.text == rev_p_text:
rev_p += +1
if p > 0 and p == rev_p:
self.formula_items["p_group"] = [selection, item]
for p in self.formula_items["p_group"]:
p.color = (0.96, 0.26, 0.21, 1)
break
else:
p, rev_p = 0, 0
for item in f[i:]:
if item.text == "(":
p += 1
elif item.text == ")":
rev_p += 1
if p > rev_p:
self.parenthesis_handler(item)
break
def validate_parentheses(self):
# Define parentheses only list.
pl = []
# Define valid parentheses list.
vl = []
for i in self.my_formula.children[::-1]:
if i.text == "(" or i.text == ")":
pl.append(i)
for o in pl:
p, rev_p = 0, 0
if o.text == "(":
for c in pl[pl.index(o):]:
if c.text == ")":
rev_p += +1
else:
p += +1
if p == rev_p:
vl.append(o)
vl.append(c)
break
for par in pl:
if par in vl:
par.bold = True
par.italic = False
par.color = (0, 0, 0, 1)
else:
par.bold = False
par.italic = True
par.color = (0.5, 0.5, 0.5, 1)
# Index Algebra calculations.
def calc_backspace_pressed(self):
# Ref my_formula children list.
fc = self.my_formula.children
# Ref Last Item.
li = self.formula_items["last_item"]
# Ref Last Item index.
ili = fc.index(li)
# If selected item is not the first item of the formula.
if len(fc)-1 != ili:
try:
# Try to select the item 2 slots to the left.
self.formula_selected_item(fc[ili+2])
except IndexError:
# Selected item is the 2nd in the list. Select first item.
self.formula_selected_item(fc[ili+1])
# Redefine ili because last_item was changed.
ili = fc.index(self.formula_items["last_item"])
self.my_formula.remove_widget(fc[ili-1])
# If not first item in the formula.
if fc.index(self.formula_items["last_item"]) != 0:
self.my_formula.remove_widget(fc[ili-2])
# Locate possible parentheses errors.
self.validate_parentheses()
def clear_formula(self):
# Clear formula.
self.my_formula.clear_widgets()
# Creation of new space item.
self.formula_spacer(0)
def exec_formula(self, filename):
# Prepare composite index model dict.
self.cim = {self.rev_country_dict[r]: [] for r in self.iry_iteration["r"][1:]}
# Replacement set of dictionaries.
rep1 = {"[color=000000][b]": "self.indicator_var_eval('",
"[/b][/color][sub][color=ff0080][": "','",
"][/color][color=0d88d2][": "',",
"][/color][/sub]": ")"}
rep2 = {"b][": "", "][/b": ""}
rep3 = {"[color=f44336][i][font=timesi]": "math.", "[/font][/i][/color]": ""}
formula = []
for item in reversed(self.my_formula.children):
if item.text != "":
iv = item.text
# Cleanup markup code Indicator Variables.
if iv[:17] == "[color=000000][b]":
# LvL 1 replacement.
for key, var in rep1.iteritems():
iv = iv.replace(key, var)
# LvL 2 replacement.
for key, var in rep2.iteritems():
iv = iv.replace(key, var)
# Cleanup markup code from function Names.
elif iv[:17] == "[color=f44336][i]":
for key, var in rep3.iteritems():
iv = iv.replace(key, var)
else:
# Try to convert all numbers to floats.
try:
iv = str(float(iv))
except ValueError:
pass
# Something really unexpected just happened.
except Exception as nfe:
print "def exec_formula(self):", type(nfe), nfe.__doc__, nfe.message
formula.append(iv)
# Join formula into a single string.
string_formula = "".join(formula)
# Store possible errors in this dictionary.
e = {}
try:
# Start building the log file.
| |
<reponame>bkolosk1/bkolosk1-CrossLingualKeywords
import re
def parse_to_latex():
configs = ['nolm', 'lm', 'maskedlm', 'lm+bp', 'lm+pos', 'lm+rnn', 'lm+bpe+rnn', 'lm+bpe+crf']
datasets = ['kp20k', 'inspec', 'krapivin', 'nus', 'semeval', 'kptimes', 'jptimes', 'duc']
config_dict = {}
with open('class_results-FINAL.txt', 'r', encoding='utf8') as file:
for line in file.readlines():
if line.startswith('Classification'):
config = line.split('_')[-3]
print(config)
if line.startswith('Dataset:'):
dataset = line.split()[-1]
print(dataset)
if line.startswith('Precision') and not line.startswith('Precision@M:') and not line.startswith(('Precision@k')):
measure = line.split()[-2][:-1]
score = line.split()[-1]
print(measure, score)
if config not in config_dict:
config_dict[config] = {}
config_dict[config][dataset] = [(measure, score)]
else:
if dataset in config_dict[config]:
config_dict[config][dataset].append((measure, score))
else:
config_dict[config][dataset] = [(measure, score)]
lines = []
average5 = []
average10 = []
for config in configs:
sum5 = 0
sum10 = 0
column = []
for dataset in datasets:
column.append(dataset)
for e in config_dict[config][dataset]:
column.append((e[0], e[1]))
if e[0].endswith('10'):
sum10 += float(e[1])
if e[0].endswith('5'):
sum5 += float(e[1])
sum10 = sum10/len(datasets)
sum5 = sum5/len(datasets)
average5.append(sum5)
average10.append(sum10)
lines.append(column)
print(lines)
print("& " + " & ".join(configs) + '\\\\\\hline')
for i in range(len(lines[0])):
if i % 3 == 0:
dataset = lines[0][i]
#print(dataset)
print('& \\multicolumn{8}{c}{\\textbf{' + dataset + '}}\\\\\\hline')
else:
#print(lines[0])
line = lines[0][i][0] + " & " + " & ".join([x[i][1] for x in lines]) + '\\\\'
print(line)
print('& \\multicolumn{7}{c}{\\textbf{Average}}\\\\\\hline')
print("F@5 & " + " & ".join(["{:.4f}".format(x) for x in average5]) + '\\\\')
print("F@10 & " + " & ".join(["{:.4f}".format(x) for x in average10]) + '\\\\')
#parse_to_latex()
def get_averages():
results ='''
& \multicolumn{9}{c}{\textbf{KP20k}} \\\hline
F1@5 & 0.072 & 0.181 & 0.141* & 0.177* & 0.046 & 0.005 & 0.317 & \textbf{0.348} & 0.252* & 0.339* & 0.342*\\
F1@10 & 0.094 & 0.151 & 0.146* & 0.160* & 0.044 & 0.005 & 0.273 & 0.298 & 0.256* & 0.342* & \textbf{0.346*}\\
\hline
& \multicolumn{9}{c}{\textbf{Inspec}} \\\hline
F1@5 & 0.160 & 0.286 & 0.204* & 0.101* & 0.022 & 0.035 & 0.244 & 0.276 & 0.293* & \textbf{0.467*} & 0.447*\\
F1@10 & 0.244 & 0.339 & 0.223* & 0.108* & 0.022 & 0.046 & 0.289 & 0.333 & 0.335* & \textbf{0.525*} & \textbf{0.525*}\\
\hline
& \multicolumn{9}{c}{\textbf{Krapivin}} \\\hline
F1@5 & 0.067 & 0.185 & 0.215* & 0.127* & 0.018 & 0.005 & 0.305 & \textbf{0.325} & 0.210* & 0.280* & 0.301*\\
F1@10 & 0.093 & 0.160 & 0.196* & 0.106* & 0.017 & 0.007 & 0.266 & 0.285 & 0.214* & 0.283* & \textbf{0.307*}\\
\hline
& \multicolumn{9}{c}{\textbf{NUS}} \\\hline
F1@5 & 0.112 & 0.230 & 0.159* & 0.224* & 0.073 & 0.004 & 0.376 & \textbf{0.374} & 0.274* & 0.311* & 0.350*\\
F1@10 & 0.140 & 0.216 & 0.196* & 0.193* & 0.071 & 0.006 & 0.352 & 0.366 & 0.305* & 0.332* & \textbf{0.369*}\\
\hline
& \multicolumn{9}{c}{\textbf{SemEval}} \\\hline
F1@5 & 0.088 & 0.217 & 0.151* & 0.167* & 0.068 & 0.011 & 0.318 & \textbf{0.327} & 0.261* & 0.214 & 0.291*\\
F1@10 & 0.147 & 0.226 & 0.212* & 0.159* & 0.065 & 0.014 & 0.318 & 0.352 & 0.295* & 0.232 & \textbf{0.355*}\\
\hline\hline
& \multicolumn{9}{c}{\textbf{KPTimes}} \\\hline
F1@5 & 0.179* & 0.022* & 0.105* & 0.168* & * & * & 0.406* & 0.424* & 0.353* & 0.439* & \textbf{0.469*}\\
F1@10 & 0.151* & 0.030* & 0.118* & 0.139* & * & * & 0.393 & 0.424* & 0.354* & 0.440* & \textbf{0.469*}\\\hline
& \multicolumn{9}{c}{\textbf{JPTimes}} \\\hline
F1@5 & 0.266* & 0.012* & 0.109* & 0.225* & * & * & 0.256* & 0.238* & 0.258* & \textbf{0.344*} & 0.337*\\
F1@10 & 0.229* & 0.026* & 0.135* & 0.185* & * & * & 0.246 & 0.238* & 0.267* & 0.346* & \textbf{0.360*}\\\hline
& \multicolumn{9}{c}{\textbf{DUC}} \\\hline
F1@5 & 0.098* & 0.120* & 0.106* & 0.189* & * & * & 0.083 & 0.063* & 0.247* & 0.281* & \textbf{0.312*}\\
F1@10 & 0.120* & 0.181* & 0.132* & 0.172* & * & * & 0.105 & 0.063* & 0.277* & 0.321* & \textbf{0.355*}\\\hline
'''
f5s = [[], [], [], [], [], [], [], [], [], [], []]
f10s = [[], [], [], [], [], [], [], [], [], [], []]
for line in results.split('\n'):
line = line.strip()
print(line)
if line.startswith('F1@5'):
line = line.split('&')
line = line[1:]
for idx, score in enumerate(line):
score = score.strip()
score = re.findall(r'\d+', score)
if len(score) > 0:
f5s[idx].append((float(".".join(score))))
else:
f5s[idx].append(0)
elif line.startswith('F1@10'):
line = line.split('&')
line = line[1:]
for idx, score in enumerate(line):
score = score.strip()
#print(score)
score = re.findall(r'\d+', score)
#print(score)
if len(score) > 0:
f10s[idx].append((float(".".join(score))))
else:
f10s[idx].append(0)
print(f5s)
print(f10s)
f5s = " & ".join(['F1@5'] + ["{:.4f}".format(sum(x)/len(x)) for x in f5s])
f10s = " & ".join(['F1@10'] + ["{:.4f}".format(sum(x)/len(x)) for x in f10s])
print(f5s)
print(f10s)
#get_averages()
def revert():
results = '''& TfIdf & TextRank & YAKE & RaKUn & Key2Vec & EmbedRank & KEA & Maui & SemiSupervised & CopyRNN & CatSeqD & CorrRNN & GPT-2 & \begin{tabular}[x]{@{}c@{}}GPT-2 + \\ BiLSTM-CRF\end{tabular} & TNT-KID \\\hline
& \multicolumn{9}{c}{\textbf{KP20k}} \\\hline
F1@5 & 0.072 & 0.181 & 0.141* & 0.177* & 0.080* & 0.135* & 0.046 & 0.005 & 0.308 & 0.317 & \textbf{0.348} & / & 0.252* & 0.343* & 0.338*\\
F1@10 & 0.094 & 0.151 & 0.146* & 0.160* & 0.090* & 0.134* & 0.044 & 0.005 & 0.245 & 0.273 & 0.298 & / & 0.256* & \textbf{0.347*} & 0.342*\\
\hline
& \multicolumn{9}{c}{\textbf{Inspec}} \\\hline
F1@5 & 0.160 & 0.286 & 0.204* & 0.101* & 0.121* & 0.345* & 0.022 & 0.035 & 0.326 & 0.244 & 0.276 & / & 0.293* & \textbf{0.468*} & 0.456*\\
F1@10 & 0.244 & 0.339 & 0.223* & 0.108* & 0.181* & 0.394* & 0.022 & 0.046 & 0.334 & 0.289 & 0.333 & / & 0.335* & \textbf{0.535*} & 0.534*\\
\hline
& \multicolumn{9}{c}{\textbf{Krapivin}} \\\hline
F1@5 & 0.067 & 0.185 & 0.215* & 0.127* & 0.068* & 0.149* & 0.018 & 0.005 & 0.296 & 0.305 & \textbf{0.325} & 0.318 & 0.210* & 0.302* & 0.313*\\
F1@10 & 0.093 & 0.160 & 0.196* & 0.106* & 0.082* & 0.158* & 0.017 & 0.007 & 0.240 & 0.266 & 0.285 & 0.278 & 0.214* & 0.302* & \textbf{0.318*}\\
\hline
& \multicolumn{9}{c}{\textbf{NUS}} \\\hline
F1@5 & 0.112 & 0.230 & 0.159* & 0.224* & 0.109* & 0.173* & 0.073 & 0.004 & 0.356 & 0.376 & \textbf{0.374} & 0.361 & 0.274* & 0.315* & 0.345*\\
F1@10 & 0.140 & 0.216 & 0.196* & 0.193* & 0.121* & 0.190* & 0.071 & 0.006 & 0.320 & 0.352 & 0.366 & 0.335 & 0.305* & 0.333* & \textbf{0.357*}\\
\hline
& \multicolumn{9}{c}{\textbf{SemEval}} \\\hline
F1@5 & 0.088 & 0.217 & 0.151* & 0.167* & 0.081* & 0.189* & 0.068 & 0.011 & 0.322 & 0.318 & \textbf{0.327} & 0.320 & 0.261* & 0.262 & 0.294*\\
F1@10 & 0.147 & 0.226 & 0.212* & 0.159* & 0.126* & 0.217* & 0.065 & 0.014 & 0.294 & 0.318 & \textbf{0.352} & 0.320 & 0.295* & 0.273 & 0.334*\\
\hline\hline
& \multicolumn{9}{c}{\textbf{KPTimes}} \\\hline
F1@5 & 0.179* & 0.022* & 0.105* & 0.168* & 0.126* & 0.063* & * & * & / & 0.406* & 0.424* & / & 0.353* & \textbf{0.497*} & \textbf0.488*\\
F1@10 & 0.151* & 0.030* & 0.118* & 0.139* & 0.116* & 0.057* & * & * & / & 0.393 & 0.424* & / & 0.354* & \textbf{0.497*} & 0.486*\\\hline
& \multicolumn{9}{c}{\textbf{JPTimes}} \\\hline
F1@5 & 0.266* & 0.012* & 0.109* & 0.225* & 0.158* & 0.081* & * & * & / & 0.256* & 0.238* & / & 0.258* & 0.375* & \textbf{0.385*}\\
F1@10 & 0.229* & 0.026* & 0.135* & 0.185* & 0.145* & 0.074* & * & * & / & 0.246 & 0.238* & / & 0.267* & 0.380* & \textbf{0.385*}\\\hline
& \multicolumn{9}{c}{\textbf{DUC}} \\\hline
F1@5 & 0.098* & 0.120* & 0.106* & 0.189* & 0.062* & 0.219* & * & * & / & 0.083 & 0.063* & / & 0.247* & \textbf{0.334*} & 0.310*\\
F1@10 & 0.120* & 0.181* & 0.132* & 0.172* & 0.078* & 0.246* & * | |
"""
Created by <NAME> on Apr 24 2021
purpose: easily compare values between helita and aux vars from a simulation.
Highest-level use-case: compare all the aux vars with their helita counterparts!
#<< input:
from helita.sim import aux_compare as axc
from helita.sim import ebysus as eb
dd = eb.EbysusData(...) # you must fill in the ... as appropriate.
c = axc.compare_all(dd)
#>> output:
>->->->->->->-> initiate comparison for auxvar = etg <-<-<-<-<-<-<-<
auxvar etg min= 4.000e+03, mean= 4.000e+03, max= 4.000e+03
helvar tg -1 min= 4.000e+03, mean= 4.000e+03, max= 4.000e+03; mean ratio (aux / helita): 1.000e+00
----------------------------------------------------------------------------------------------------------------------
comparison_result(N_differ=0, N_total=1, runtime=0.0020618438720703125)
>->->->->->->-> initiate comparison for auxvar = mm_cnu <-<-<-<-<-<-<-<
auxvar mm_cnu ( 1, 1) ( 1, 2) min= 8.280e+05, mean= 8.280e+05, max= 8.280e+05
helvar nu_ij ( 1, 1) ( 1, 2) min= 8.280e+05, mean= 8.280e+05, max= 8.280e+05; mean ratio (aux / helita): 1.000e+00
---------------------------------------------------------------------------------------------------------------------------------
... << (more lines of output, which we are not showing you in this file, to save space.)
#<< more input:
print(c)
#>> more output:
{'N_compare': 30, 'N_var': 8, 'N_differ': 4, 'N_diffvar': 1, 'N_error': 1,
'errors': [FileNotFoundError(2, 'No such file or directory')], 'runtime': 1.581925868988037}
High-level use-case: compare a single aux var with its helita counterpart!
#<< input:
from helita.sim import aux_compare as axc
from helita.sim import ebysus as eb
dd = eb.EbysusData(...) # you must fill in the ... as appropriate.
axc.compare(dd, 'mfr_nu_es')
#>> output:
auxvar mfr_nu_es ( 1, 1) min= 3.393e+04, mean= 3.393e+04, max= 3.393e+04
helvar nu_ij -1 ( 1, 1) min= 1.715e+04, mean= 1.715e+04, max= 1.715e+04; mean ratio (aux / helita): 1.978e+00
>>> WARNING: RATIO DIFFERS FROM 1.000 <<<<
------------------------------------------------------------------------------------------------------------------------------------
auxvar mfr_nu_es ( 1, 2) min= 1.621e+05, mean= 1.621e+05, max= 1.621e+05
helvar nu_ij -1 ( 1, 2) min= 1.622e+05, mean= 1.622e+05, max= 1.622e+05; mean ratio (aux / helita): 9.993e-01
------------------------------------------------------------------------------------------------------------------------------------
#<< more input:
axc.compare(dd, 'mm_cnu')
#>> more output:
auxvar mm_cnu ( 1, 1) ( 1, 2) min= 8.280e+05, mean= 8.280e+05, max= 8.280e+05
helvar nu_ij ( 1, 1) ( 1, 2) min= 8.280e+05, mean= 8.280e+05, max= 8.280e+05; mean ratio (aux / helita): 1.000e+00
---------------------------------------------------------------------------------------------------------------------------------
auxvar mm_cnu ( 1, 2) ( 1, 1) min= 8.280e+06, mean= 8.280e+06, max= 8.280e+06
helvar nu_ij ( 1, 2) ( 1, 1) min= 8.280e+06, mean= 8.280e+06, max= 8.280e+06; mean ratio (aux / helita): 1.000e+00
---------------------------------------------------------------------------------------------------------------------------------
# output format notes:
# vartype varname (ispecie, ilevel) (jspecie, jlevel) min mean max
# when ispecies < 0 or jspecie < 0 (i.e. for electrons), they may be shown as "specie" instead of "(ispecie, ilevel)".
TODO (maybe):
- allow to put kwargs in auxvar lookup.
- for example, ebysus defines mm_cross = 0 when ispecies is ion, to save space.
meanwhile get_var('cross') in helita will tell same values even if fluids are swapped.
e.g. get_var('mm_cross', ifluid=(1,2), jfluid=(1,1)) == 0
get_var('cross', ifluid=(1,2), jfluid=(1,1)) == get_var('cross', ifluid=(1,1), jfluid=(1,2))
"""
# import built-in
from collections import namedtuple
import time
# import internal modules
from . import fluid_tools
# import external private modules
try:
from at_tools import fluids as fl
except ImportError:
fl = None
warnings.warn('failed to import at_tools.fluids; some functions in helita.sim.aux_compare may crash')
# set defaults
DEFAULT_TOLERANCE = 0.05 # the max for (1-abs(X/Y)) before we think X != Y
''' ----------------------------- lookup helita counterpart to aux var ----------------------------- '''
# dict of defaults for converting from auxvar to helita var (aka "helvar").
AUXVARS = {
# aux var : helita var. if tuple, v[1] is required ifluid or mf_ispecies.
# v[2] (if it exists) jfluid or mf_jspecies.
'etg' : ('tg', -1), # electron temperature
'mfe_tg' : 'tg', # fluid temperature
'mfr_nu_es' : ('nu_ij', -1), # electron-fluid collision frequency
'mm_cnu' : 'nu_ij', # fluid - fluid collision frequency
'mm_cross' : 'cross', # cross section
'mfr_p' : 'p', # pressure
}
# add each of these plus an axis to AUXVARS.
# e.g. {'e': 'ef'} --> {'ex': 'efx', 'ey': 'efy', 'ez': 'efz'}.
AUX_AXIAL_VARS = {
'e' : 'ef',
'eu' : 'ue',
'i' : 'j',
}
AXES = ['x', 'y', 'z']
# add the axial vars to auxvars.
for (aux, hel) in AUX_AXIAL_VARS.items():
AUXVARS.update({aux+x: hel+x for x in AXES})
def get_helita_var(auxvar):
return AUXVARS[auxvar]
''' ----------------------------- get_var for helita & aux ----------------------------- '''
def _callsig(helvar):
'''returns dict with keys for getvar for helvar'''
if isinstance(helvar, str):
return dict(var=helvar)
#else: helvar has len 2 or longer
result = dict(var=helvar[0])
try:
next(iter(helvar[1]))
except TypeError: # helvar[1] is not a list
result.update(dict(mf_ispecies=helvar[1]))
else: # helvar[1] is a list
result.update(dict(ifluid=helvar[1]))
if len(helvar)>2: # we have info for jfluid as well.
try:
next(iter(helvar[2]))
except TypeError:
result.update(dict(mf_jspecies=helvar[2]))
else:
result.update(dict(jfluid=helvar[2]))
return result
def _loop_fluids(obj, callsig):
'''return the fluid kws which need to be looped through.
obj should be EbysusData object.
callsig should be _callsig(helvar).
returns a tuple telling whether to loop through (ifluid, jfluid) for helvar.
'''
var = callsig['var']
search = obj.search_vardict(var)
nfluid = search.result['nfluid']
if nfluid is None: # we do not need to loop through any fluids.
return (False, False)
elif nfluid == 0: # we do not need to loop through any fluids.
assert list(callsig.keys()) == ['var'], "invalid var tuple in AUXVARS for nfluid=0 var '{}'".format(var)
return (False, False)
elif nfluid == 1: # we might need to loop through ifluid.
result = [True, False]
for kw in ['mf_ispecies', 'ifluid']:
if kw in callsig.keys():
result[0] = False # we do not need to loop through ifluid.
break
return tuple(result)
elif nfluid == 2: # we might need to loop through ifluid and/or jfluid.
result = [True, True]
for kw in ['mf_jspecies', 'jfluid']:
if kw in callsig.keys():
result[1] = False # we do not need to loop through jfluid.
break
for kw in ['mf_ispecies', 'ifluid']:
if kw in callsig.keys():
result[0] = False # we do not need to loop through ifluid.
break
return tuple(result)
else:
raise NotImplementedError # we don't know what to do when nfluid is not 0, 1, 2, or None.
def _iter_fluids(fluids, loopfluids, **kw__fluid_pairs):
'''returns an iterator which yields pairs of dicts: (daux, dhel)
daux are the fluid kws to call with aux var
dhel are the fluid kws to call with helita var.
loopfluids ==
(False, False) -> yields (dict(), dict()) then stops iteration.
(True, False) -> yields (dict(ifluid=fluid), dict(ifluid=fluid)) for fluid in fluids.
(False, True ) -> yields (dict(ifluid=fluid), dict(jfluid=fluid)) for fluid in fluids.
(True, True) -> yields (x, x) where x is a dict with keys ifluid, jfluid,
and we iterate over pairs of ifluid, jfluid.
**kw__fluid_pairs
only matters if loopfluids == (True, True);
these kwargs go to fluid_tools.fluid_pairs.
'''
loopi, loopj = loopfluids
if not loopi and not loopj:
x = dict()
yield (x, x)
elif loopi and not loopj:
for fluid in fluids:
x = dict(ifluid=fluid)
yield (x, x)
elif not loopi and loopj:
for fluid in fluids:
yield (dict(ifluid=fluid), dict(jfluid=fluid))
elif loopi and loopj:
for ifluid, jfluid in fluid_tools.fluid_pairs(fluids, **kw__fluid_pairs):
x = dict(ifluid=ifluid, jfluid=jfluid)
yield (x, x)
def _SL_fluids(fluids_dict, f = lambda fluid: fluid):
'''update values in fluids_dict by applying f'''
return {key: f(val) for key, val in fluids_dict.items()}
def _setup_fluid_kw(auxvar, callsig, auxfluids, helfluids, f=lambda fluid: fluid):
'''returns ((args, kwargs) to use with auxvar, (args, kwargs) to use with helitavar)
args with be the list [var]
kwargs will be the dict of auxfluids (or helfluids). (species, levels) only.
f is applied to all values in auxfluids and helfluids.
use f = (lambda fluid: fluid.SL) when fluids are at_tools.fluids.Fluids,
to convert them to (species, level) tuples.
'''
# convert fluids to SLs via f
auxfluids = _SL_fluids(auxfluids, f=f)
helfluids = _SL_fluids(helfluids, f=f)
# pop var from callsig (we pass var as arg rather than kwarg).
callsigcopy = callsig.copy() # copy to ensure callsig is not altered
helvar = callsigcopy.pop('var')
helfluids.update(callsigcopy)
# make & return output
callaux = ([auxvar], auxfluids)
callhel = ([helvar], helfluids)
return (callaux, callhel)
def _get_fluids_and_f(obj, fluids=None, f=lambda fluid: fluid):
'''returns fluids, f.
if fluids is None:
fluids = fl.Fluids(dd=obj)
f = lambda fluid: fluid.SL
if we failed to import at_tools.fluids, try fluids=obj.fluids, before giving up.
'''
if fluids is None:
f = lambda fluid: fluid.SL
if fl is None:
if not obj.hasattr('fluids'):
errmsg | |
Death Mountain Ledge (West)', 'Turtle Rock Ledge Exit (West)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (East)', 'Turtle Rock Ledge Exit (East)', player)
def unbias_some_entrances(world, Dungeon_Exits, Cave_Exits, Old_Man_House, Cave_Three_Exits):
def shuffle_lists_in_list(ls):
for i, item in enumerate(ls):
if isinstance(item, list):
ls[i] = world.random.sample(item, len(item))
def tuplize_lists_in_list(ls):
for i, item in enumerate(ls):
if isinstance(item, list):
ls[i] = tuple(item)
shuffle_lists_in_list(Dungeon_Exits)
shuffle_lists_in_list(Cave_Exits)
shuffle_lists_in_list(Old_Man_House)
shuffle_lists_in_list(Cave_Three_Exits)
# paradox fixup
if Cave_Three_Exits[1][0] == "Paradox Cave Exit (Bottom)":
i = world.random.randint(1, 2)
Cave_Three_Exits[1][0] = Cave_Three_Exits[1][i]
Cave_Three_Exits[1][i] = "Paradox Cave Exit (Bottom)"
# TR fixup
tr_fixup = False
for i, item in enumerate(Dungeon_Exits[-1]):
if 'Turtle Rock Ledge Exit (East)' == item:
tr_fixup = True
if 0 != i:
Dungeon_Exits[-1][i] = Dungeon_Exits[-1][0]
Dungeon_Exits[-1][0] = 'Turtle Rock Ledge Exit (East)'
break
if not tr_fixup: raise RuntimeError("TR entrance shuffle fixup didn't happen")
tuplize_lists_in_list(Dungeon_Exits)
tuplize_lists_in_list(Cave_Exits)
tuplize_lists_in_list(Old_Man_House)
tuplize_lists_in_list(Cave_Three_Exits)
lookup = {
"both": connect_two_way,
"entrance": connect_entrance,
"exit": lambda x, y, z, w: connect_exit(x, z, y, w)
}
def plando_connect(world, player: int):
if world.plando_connections[player]:
for connection in world.plando_connections[player]:
func = lookup[connection.direction]
try:
func(world, connection.entrance, connection.exit, player)
except Exception as e:
raise Exception(f"Could not connect using {connection}") from e
LW_Dungeon_Entrances = ['Desert Palace Entrance (South)',
'Desert Palace Entrance (West)',
'Desert Palace Entrance (North)',
'Eastern Palace',
'Tower of Hera',
'Hyrule Castle Entrance (West)',
'Hyrule Castle Entrance (East)',
'Agahnims Tower']
LW_Dungeon_Entrances_Must_Exit = ['Desert Palace Entrance (East)']
DW_Dungeon_Entrances = ['Thieves Town',
'Skull Woods Final Section',
'Ice Palace',
'Misery Mire',
'Palace of Darkness',
'Swamp Palace',
'Turtle Rock',
'Dark Death Mountain Ledge (West)']
DW_Dungeon_Entrances_Must_Exit = ['Dark Death Mountain Ledge (East)',
'Turtle Rock Isolated Ledge Entrance']
Dungeon_Exits_Base = [['Desert Palace Exit (South)', 'Desert Palace Exit (West)', 'Desert Palace Exit (East)'],
'Desert Palace Exit (North)',
'Eastern Palace Exit',
'Tower of Hera Exit',
'Thieves Town Exit',
'Skull Woods Final Section Exit',
'Ice Palace Exit',
'Misery Mire Exit',
'Palace of Darkness Exit',
'Swamp Palace Exit',
'Agahnims Tower Exit',
['Turtle Rock Ledge Exit (East)',
'Turtle Rock Exit (Front)', 'Turtle Rock Ledge Exit (West)', 'Turtle Rock Isolated Ledge Exit']]
DW_Entrances_Must_Exit = ['Bumper Cave (Top)', 'Hookshot Cave Back Entrance']
Two_Door_Caves_Directional = [('Bumper Cave (Bottom)', 'Bumper Cave (Top)'),
('Hookshot Cave', 'Hookshot Cave Back Entrance')]
Two_Door_Caves = [('Elder House (East)', 'Elder House (West)'),
('Two Brothers House (East)', 'Two Brothers House (West)'),
('Superbunny Cave (Bottom)', 'Superbunny Cave (Top)')]
Old_Man_Entrances = ['Old Man Cave (East)',
'Old Man House (Top)',
'Death Mountain Return Cave (East)',
'Spectacle Rock Cave',
'Spectacle Rock Cave Peak',
'Spectacle Rock Cave (Bottom)']
Old_Man_House_Base = [['Old Man House Exit (Bottom)', 'Old Man House Exit (Top)']]
Cave_Exits_Base = [['Elder House Exit (East)', 'Elder House Exit (West)'],
['Two Brothers House Exit (East)', 'Two Brothers House Exit (West)'],
['Death Mountain Return Cave Exit (West)', 'Death Mountain Return Cave Exit (East)'],
['Fairy Ascension Cave Exit (Bottom)', 'Fairy Ascension Cave Exit (Top)'],
['Bumper Cave Exit (Top)', 'Bumper Cave Exit (Bottom)'],
['Hookshot Cave Exit (South)', 'Hookshot Cave Exit (North)']]
Cave_Exits_Base += [('Superbunny Cave Exit (Bottom)', 'Superbunny Cave Exit (Top)'),
('Spiral Cave Exit (Top)', 'Spiral Cave Exit')]
Cave_Three_Exits_Base = [
('Spectacle Rock Cave Exit (Peak)', 'Spectacle Rock Cave Exit (Top)', 'Spectacle Rock Cave Exit'),
('Paradox Cave Exit (Top)', 'Paradox Cave Exit (Middle)', 'Paradox Cave Exit (Bottom)')
]
LW_Entrances = ['Elder House (East)',
'Elder House (West)',
'Two Brothers House (East)',
'Two Brothers House (West)',
'Old Man Cave (West)',
'Old Man House (Bottom)',
'Death Mountain Return Cave (West)',
'Paradox Cave (Bottom)',
'Paradox Cave (Middle)',
'Paradox Cave (Top)',
'Fairy Ascension Cave (Bottom)',
'Fairy Ascension Cave (Top)',
'Spiral Cave',
'Spiral Cave (Bottom)']
DW_Entrances = ['Bumper Cave (Bottom)',
'Superbunny Cave (Top)',
'Superbunny Cave (Bottom)',
'Hookshot Cave']
Bomb_Shop_Multi_Cave_Doors = ['Hyrule Castle Entrance (South)',
'Misery Mire',
'Thieves Town',
'Bumper Cave (Bottom)',
'Swamp Palace',
'Hyrule Castle Secret Entrance Stairs',
'Skull Woods First Section Door',
'Skull Woods Second Section Door (East)',
'Skull Woods Second Section Door (West)',
'Skull Woods Final Section',
'Ice Palace',
'Turtle Rock',
'Dark Death Mountain Ledge (West)',
'Dark Death Mountain Ledge (East)',
'Superbunny Cave (Top)',
'Superbunny Cave (Bottom)',
'Hookshot Cave',
'Ganons Tower',
'Desert Palace Entrance (South)',
'Tower of Hera',
'Two Brothers House (West)',
'Old Man Cave (East)',
'Old Man House (Bottom)',
'Old Man House (Top)',
'Death Mountain Return Cave (East)',
'Death Mountain Return Cave (West)',
'Spectacle Rock Cave Peak',
'Spectacle Rock Cave',
'Spectacle Rock Cave (Bottom)',
'Paradox Cave (Bottom)',
'Paradox Cave (Middle)',
'Paradox Cave (Top)',
'Fairy Ascension Cave (Bottom)',
'Fairy Ascension Cave (Top)',
'Spiral Cave',
'Spiral Cave (Bottom)',
'Palace of Darkness',
'Hyrule Castle Entrance (West)',
'Hyrule Castle Entrance (East)',
'Agahnims Tower',
'Desert Palace Entrance (West)',
'Desert Palace Entrance (North)'
# all entrances below this line would be possible for blacksmith_hut
# if it were not for dwarf checking multi-entrance caves
]
Blacksmith_Multi_Cave_Doors = ['Eastern Palace',
'Elder House (East)',
'Elder House (West)',
'Two Brothers House (East)',
'Old Man Cave (West)',
'Sanctuary',
'Lumberjack Tree Cave',
'Lost Woods Hideout Stump',
'North Fairy Cave',
'Bat Cave Cave',
'Kakariko Well Cave']
LW_Single_Cave_Doors = ['Blinds Hideout',
'Lake Hylia Fairy',
'Light Hype Fairy',
'Desert Fairy',
'Chicken House',
'Aginahs Cave',
'Sahasrahlas Hut',
'Cave Shop (Lake Hylia)',
'Blacksmiths Hut',
'Sick Kids House',
'Lost Woods Gamble',
'Fortune Teller (Light)',
'Snitch Lady (East)',
'Snitch Lady (West)',
'Bush Covered House',
'Tavern (Front)',
'Light World Bomb Hut',
'Kakariko Shop',
'Mini Moldorm Cave',
'Long Fairy Cave',
'Good Bee Cave',
'20 Rupee Cave',
'50 Rupee Cave',
'Ice Rod Cave',
'Library',
'Potion Shop',
'Dam',
'Lumberjack House',
'Lake Hylia Fortune Teller',
'Kakariko Gamble Game',
'Waterfall of Wishing',
'Capacity Upgrade',
'Bonk Rock Cave',
'Graveyard Cave',
'Checkerboard Cave',
'Cave 45',
'Kings Grave',
'Bonk Fairy (Light)',
'Hookshot Fairy',
'Mimic Cave']
DW_Single_Cave_Doors = ['Bonk Fairy (Dark)',
'Dark Sanctuary Hint',
'Dark Lake Hylia Fairy',
'C-Shaped House',
'Big Bomb Shop',
'Dark Death Mountain Fairy',
'Dark Lake Hylia Shop',
'Dark World Shop',
'Red Shield Shop',
'Mire Shed',
'East Dark World Hint',
'Dark Desert Hint',
'Spike Cave',
'Palace of Darkness Hint',
'Dark Lake Hylia Ledge Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Dark World Potion Shop',
'Pyramid Fairy',
'Archery Game',
'Dark World Lumberjack Shop',
'Hype Cave',
'Brewery',
'Dark Lake Hylia Ledge Hint',
'Chest Game',
'Dark Desert Fairy',
'Dark Lake Hylia Ledge Fairy',
'Fortune Teller (Dark)',
'Dark World Hammer Peg Cave']
Blacksmith_Single_Cave_Doors = ['Blinds Hideout',
'Lake Hylia Fairy',
'Light Hype Fairy',
'Desert Fairy',
'Chicken House',
'Aginahs Cave',
'Sahasrahlas Hut',
'Cave Shop (Lake Hylia)',
'Blacksmiths Hut',
'Sick Kids House',
'Lost Woods Gamble',
'Fortune Teller (Light)',
'Snitch Lady (East)',
'Snitch Lady (West)',
'Bush Covered House',
'Tavern (Front)',
'Light World Bomb Hut',
'Kakariko Shop',
'Mini Moldorm Cave',
'Long Fairy Cave',
'Good Bee Cave',
'20 Rupee Cave',
'50 Rupee Cave',
'Ice Rod Cave',
'Library',
'Potion Shop',
'Dam',
'Lumberjack House',
'Lake Hylia Fortune Teller',
'Kakariko Gamble Game']
Bomb_Shop_Single_Cave_Doors = ['Waterfall of Wishing',
'Capacity Upgrade',
'Bonk Rock Cave',
'Graveyard Cave',
'Checkerboard Cave',
'Cave 45',
'Kings Grave',
'Bonk Fairy (Light)',
'Hookshot Fairy',
'East Dark World Hint',
'Palace of Darkness Hint',
'Dark Lake Hylia Fairy',
'Dark Lake Hylia Ledge Fairy',
'Dark Lake Hylia Ledge Spike Cave',
'Dark Lake Hylia Ledge Hint',
'Hype Cave',
'Bonk Fairy (Dark)',
'Brewery',
'C-Shaped House',
'Chest Game',
'Dark World Hammer Peg Cave',
'Red Shield Shop',
'Dark Sanctuary Hint',
'Fortune Teller (Dark)',
'Dark World Shop',
'Dark World Lumberjack Shop',
'Dark World Potion Shop',
'Archery Game',
'Mire Shed',
'Dark Desert Hint',
'Dark Desert Fairy',
'Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Dark Death Mountain Fairy',
'Mimic Cave',
'Big Bomb Shop',
'Dark Lake Hylia Shop']
Single_Cave_Doors = ['Pyramid Fairy']
Single_Cave_Targets = ['Blinds Hideout',
'Bonk Fairy (Light)',
'Lake Hylia Healer Fairy',
'Swamp Healer Fairy',
'Desert Healer Fairy',
'Kings Grave',
'Chicken House',
'Aginahs Cave',
'Sahasrahlas Hut',
'Cave Shop (Lake Hylia)',
'Sick Kids House',
'Lost Woods Gamble',
'Fortune Teller (Light)',
'Snitch Lady (East)',
'Snitch Lady (West)',
'Bush Covered House',
'Tavern (Front)',
'Light World Bomb Hut',
'Kakariko Shop',
'Cave 45',
'Graveyard Cave',
'Checkerboard Cave',
'Mini Moldorm Cave',
'Long Fairy Cave',
'Good Bee Cave',
'20 Rupee Cave',
'50 Rupee Cave',
'Ice Rod Cave',
'Bonk Rock Cave',
'Library',
'Potion Shop',
'Hookshot Fairy',
'Waterfall of Wishing',
'Capacity Upgrade',
'Pyramid Fairy',
'East Dark World Hint',
'Palace of Darkness Hint',
'Dark Lake Hylia Healer Fairy',
'Dark Lake Hylia Ledge Healer Fairy',
'Dark Lake Hylia Ledge Spike Cave',
'Dark Lake Hylia Ledge Hint',
'Hype Cave',
'Bonk | |
<filename>python-package/xgboost/sklearn.py
# coding: utf-8
# pylint: disable=too-many-arguments, too-many-locals, invalid-name, fixme, R0912, C0302
"""Scikit-Learn Wrapper interface for XGBoost."""
import copy
import warnings
import json
import os
from typing import Union, Optional, List, Dict, Callable, Tuple, Any, TypeVar, Type
import numpy as np
from .core import Booster, DMatrix, XGBoostError
from .core import _deprecate_positional_args, _convert_ntree_limit
from .core import Metric
from .training import train
from .callback import TrainingCallback
from .data import _is_cudf_df, _is_cudf_ser, _is_cupy_array
# Do not use class names on scikit-learn directly. Re-define the classes on
# .compat to guarantee the behavior without scikit-learn
from .compat import (
SKLEARN_INSTALLED,
XGBModelBase,
XGBClassifierBase,
XGBRegressorBase,
XGBoostLabelEncoder,
DataFrame,
scipy_csr,
)
# Actually XGBoost supports a lot more data types including `scipy.sparse.csr_matrix` and
# many others. See `data.py` for a complete list. The `array_like` here is just for
# easier type checks.
array_like = TypeVar("array_like", bound=Union[np.ndarray, DataFrame, scipy_csr])
class XGBRankerMixIn: # pylint: disable=too-few-public-methods
"""MixIn for ranking, defines the _estimator_type usually defined in scikit-learn base
classes."""
_estimator_type = "ranker"
_SklObjective = Optional[
Union[
str, Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
]
]
def _objective_decorator(
func: Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
) -> Callable[[np.ndarray, DMatrix], Tuple[np.ndarray, np.ndarray]]:
"""Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func:
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func:
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
"""
def inner(preds: np.ndarray, dmatrix: DMatrix) -> Tuple[np.ndarray, np.ndarray]:
"""internal function"""
labels = dmatrix.get_label()
return func(labels, preds)
return inner
__estimator_doc = '''
n_estimators : int
Number of gradient boosted trees. Equivalent to number of boosting
rounds.
'''
__model_doc = f'''
max_depth : Optional[int]
Maximum tree depth for base learners.
learning_rate : Optional[float]
Boosting learning rate (xgb's "eta")
verbosity : Optional[int]
The degree of verbosity. Valid values are 0 (silent) - 3 (debug).
objective : {_SklObjective}
Specify the learning task and the corresponding learning objective or
a custom objective function to be used (see note below).
booster: Optional[str]
Specify which booster to use: gbtree, gblinear or dart.
tree_method: Optional[str]
Specify which tree method to use. Default to auto. If this parameter
is set to default, XGBoost will choose the most conservative option
available. It's recommended to study this option from the parameters
document: https://xgboost.readthedocs.io/en/latest/treemethod.html.
n_jobs : Optional[int]
Number of parallel threads used to run xgboost. When used with other Scikit-Learn
algorithms like grid search, you may choose which algorithm to parallelize and
balance the threads. Creating thread contention will significantly slow down both
algorithms.
gamma : Optional[float]
Minimum loss reduction required to make a further partition on a leaf
node of the tree.
min_child_weight : Optional[float]
Minimum sum of instance weight(hessian) needed in a child.
max_delta_step : Optional[float]
Maximum delta step we allow each tree's weight estimation to be.
subsample : Optional[float]
Subsample ratio of the training instance.
colsample_bytree : Optional[float]
Subsample ratio of columns when constructing each tree.
colsample_bylevel : Optional[float]
Subsample ratio of columns for each level.
colsample_bynode : Optional[float]
Subsample ratio of columns for each split.
reg_alpha : Optional[float]
L1 regularization term on weights (xgb's alpha).
reg_lambda : Optional[float]
L2 regularization term on weights (xgb's lambda).
scale_pos_weight : Optional[float]
Balancing of positive and negative weights.
base_score : Optional[float]
The initial prediction score of all instances, global bias.
random_state : Optional[Union[numpy.random.RandomState, int]]
Random number seed.
.. note::
Using gblinear booster with shotgun updater is nondeterministic as
it uses Hogwild algorithm.
missing : float, default np.nan
Value in the data which needs to be present as a missing value.
num_parallel_tree: Optional[int]
Used for boosting random forest.
monotone_constraints : Optional[Union[Dict[str, int], str]]
Constraint of variable monotonicity. See tutorial for more
information.
interaction_constraints : Optional[Union[str, List[Tuple[str]]]]
Constraints for interaction representing permitted interactions. The
constraints must be specified in the form of a nest list, e.g. [[0, 1],
[2, 3, 4]], where each inner list is a group of indices of features
that are allowed to interact with each other. See tutorial for more
information
importance_type: string, default "gain"
The feature importance type for the feature_importances\\_ property:
either "gain", "weight", "cover", "total_gain" or "total_cover".
gpu_id : Optional[int]
Device ordinal.
validate_parameters : Optional[bool]
Give warnings for unknown parameter.
enable_categorical : bool
.. versionadded:: 1.5.0
Experimental support for categorical data. Do not set to true unless you are
interested in development. Only valid when `gpu_hist` and pandas dataframe are
used.
kwargs : dict, optional
Keyword arguments for XGBoost Booster object. Full documentation of
parameters can be found here:
https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst.
Attempting to set a parameter via the constructor args and \\*\\*kwargs
dict simultaneously will result in a TypeError.
.. note:: \\*\\*kwargs unsupported by scikit-learn
\\*\\*kwargs is unsupported by scikit-learn. We do not guarantee
that parameters passed via this argument will interact properly
with scikit-learn.
'''
__custom_obj_note = '''
.. note:: Custom objective function
A custom objective function can be provided for the ``objective``
parameter. In this case, it should have the signature
``objective(y_true, y_pred) -> grad, hess``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
grad: array_like of shape [n_samples]
The value of the gradient for each sample point.
hess: array_like of shape [n_samples]
The value of the second derivative for each sample point
'''
def xgboost_model_doc(
header: str, items: List[str],
extra_parameters: Optional[str] = None,
end_note: Optional[str] = None
) -> Callable[[Type], Type]:
'''Obtain documentation for Scikit-Learn wrappers
Parameters
----------
header: str
An introducion to the class.
items : list
A list of commom doc items. Available items are:
- estimators: the meaning of n_estimators
- model: All the other parameters
- objective: note for customized objective
extra_parameters: str
Document for class specific parameters, placed at the head.
end_note: str
Extra notes put to the end.
'''
def get_doc(item: str) -> str:
'''Return selected item'''
__doc = {'estimators': __estimator_doc,
'model': __model_doc,
'objective': __custom_obj_note}
return __doc[item]
def adddoc(cls: Type) -> Type:
doc = ['''
Parameters
----------
''']
if extra_parameters:
doc.append(extra_parameters)
doc.extend([get_doc(i) for i in items])
if end_note:
doc.append(end_note)
full_doc = [header + '\n\n']
full_doc.extend(doc)
cls.__doc__ = ''.join(full_doc)
return cls
return adddoc
def _wrap_evaluation_matrices(
missing: float,
X: Any,
y: Any,
group: Optional[Any],
qid: Optional[Any],
sample_weight: Optional[Any],
base_margin: Optional[Any],
feature_weights: Optional[Any],
eval_set: Optional[List[Tuple[Any, Any]]],
sample_weight_eval_set: Optional[List[Any]],
base_margin_eval_set: Optional[List[Any]],
eval_group: Optional[List[Any]],
eval_qid: Optional[List[Any]],
create_dmatrix: Callable,
enable_categorical: bool,
label_transform: Callable = lambda x: x,
) -> Tuple[Any, Optional[List[Tuple[Any, str]]]]:
"""Convert array_like evaluation matrices into DMatrix. Perform validation on the way.
"""
train_dmatrix = create_dmatrix(
data=X,
label=label_transform(y),
group=group,
qid=qid,
weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
missing=missing,
enable_categorical=enable_categorical,
)
n_validation = 0 if eval_set is None else len(eval_set)
def validate_or_none(meta: Optional[List], name: str) -> List:
if meta is None:
return [None] * n_validation
if len(meta) != n_validation:
raise ValueError(
f"{name}'s length does not eqaul to `eval_set`, " +
f"expecting {n_validation}, got {len(meta)}"
)
return meta
if eval_set is not None:
sample_weight_eval_set = validate_or_none(
sample_weight_eval_set, "sample_weight_eval_set"
)
base_margin_eval_set = validate_or_none(
base_margin_eval_set, "base_margin_eval_set"
)
eval_group = validate_or_none(eval_group, "eval_group")
eval_qid = validate_or_none(eval_qid, "eval_qid")
evals = []
for i, (valid_X, valid_y) in enumerate(eval_set):
# Skip the duplicated entry.
if all(
(
valid_X is X, valid_y is y,
sample_weight_eval_set[i] is sample_weight,
base_margin_eval_set[i] is base_margin,
eval_group[i] is group,
eval_qid[i] is qid
)
):
evals.append(train_dmatrix)
else:
m = create_dmatrix(
data=valid_X,
label=label_transform(valid_y),
weight=sample_weight_eval_set[i],
group=eval_group[i],
qid=eval_qid[i],
base_margin=base_margin_eval_set[i],
missing=missing,
enable_categorical=enable_categorical,
)
evals.append(m)
nevals = len(evals)
eval_names = ["validation_{}".format(i) for i in range(nevals)]
evals = list(zip(evals, eval_names))
else:
if any(
meta is not None
for meta in [
sample_weight_eval_set,
base_margin_eval_set,
eval_group,
eval_qid,
]
):
raise ValueError(
"`eval_set` is not set but one of the other evaluation meta info is "
"not None."
)
evals = []
return train_dmatrix, evals
@xgboost_model_doc("""Implementation of the Scikit-Learn API for XGBoost.""",
['estimators', 'model', 'objective'])
class XGBModel(XGBModelBase):
# pylint: disable=too-many-arguments, too-many-instance-attributes, missing-docstring
def __init__(
self,
max_depth: Optional[int] = None,
learning_rate: Optional[float] = None,
n_estimators: int = 100,
verbosity: Optional[int] = None,
objective: _SklObjective = None,
booster: Optional[str] = None,
tree_method: Optional[str] | |
import os
import pandas as pd
from glob import glob
import numpy as np
from joblib import Parallel, delayed
from joblib import Memory
import logging
from pymeg import atlas_glasser
memory = Memory(cachedir=os.environ['PYMEG_CACHE_DIR'], verbose=0)
backend = 'loky'
class Cache(object):
"""A cache that can prevent reloading from disk.
Can be used as a context manager.
"""
def __init__(self, cache=True):
self.store = {}
self.cache = cache
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.clear()
def get(self, globstring):
if self.cache:
if globstring not in self.store:
self.store[globstring] = self._load_tfr_data(globstring)
else:
logging.info('Returning cached object: %s' % globstring)
return self.store[globstring]
else:
return self._load_tfr_data(globstring)
def clear(self):
self.cache = {}
def _load_tfr_data(self, globstring):
"""Load all files identified by glob string"""
logging.info('Loading data for: %s' % globstring)
tfr_data_filenames = glob(globstring)
logging.info('This is these filenames: %s' % str(tfr_data_filenames))
tfrs = []
for f in tfr_data_filenames:
logging.info('Now working on: %s' % str(f))
tfr = pd.read_hdf(f)
logging.info('Done loading, now pivoting.')
tfr = pd.pivot_table(tfr.reset_index(), values=tfr.columns, index=[
'trial', 'est_val'], columns='time').stack(-2)
tfr.index.names = ['trial', 'freq', 'area']
tfrs.append(tfr)
logging.info('Concate now.')
tfr = pd.concat(tfrs)
logging.info('Done _load_tfr_data.')
return tfr
def baseline_per_sensor_get(tfr, baseline_time=(-0.25, -0)):
'''
Get average baseline
'''
time = tfr.columns.get_level_values('time').values.astype(float)
id_base = (time >= baseline_time[0]) & (time <= baseline_time[1])
base = tfr.loc[:, id_base].groupby(['freq', 'area']).mean().mean(
axis=1) # This should be len(nr_freqs * nr_hannels)
return base
def baseline_per_sensor_apply(tfr, baseline):
'''
Baseline correction by dividing by average baseline
'''
def div(x):
freqs = x.index.get_level_values('freq').values[0]
areas = x.index.get_level_values('area').values[0]
bval = float(baseline
.loc[
baseline.index.isin([freqs], level='freq')
& baseline.index.isin([areas], level='area')])
return (x - bval) / bval * 100
return tfr.groupby(['freq', 'area']).apply(div)
@memory.cache(ignore=['cache'])
def load_tfr_contrast(data_globstring, base_globstring, meta_data, conditions,
baseline_time, n_jobs=1, baseline_per_condition=True,
cache=Cache(cache=False)):
"""Load a set of data files and turn them into contrasts.
"""
tfrs = []
# load data:
tfr_data = cache.get(data_globstring)
# Make sure that meta_data and tfr_data overlap in trials
tfr_trials = np.unique(tfr_data.index.get_level_values('trial').values)
meta_trials = np.unique(meta_data.reset_index().loc[:, 'hash'].values)
assert(any([t in meta_trials for t in tfr_trials]))
# data to baseline:
if not (data_globstring == base_globstring):
tfr_data_to_baseline = cache.get(base_globstring)
else:
tfr_data_to_baseline = tfr_data
if baseline_per_condition:
# apply condition ind, collapse across trials, and get baseline::
tfr_data_to_baseline = tfr_data_to_baseline.groupby(
['freq', 'area']).mean()
# compute contrasts
tasks = []
for condition in conditions:
tasks.append((tfr_data, tfr_data_to_baseline, meta_data,
condition, baseline_time, baseline_per_condition))
tfr_conditions = Parallel(n_jobs=n_jobs, verbose=1, backend=backend)(
delayed(make_tfr_contrasts)(*task) for task in tasks)
weight_dicts = [t[1] for t in tfr_conditions]
weights = weight_dicts.pop()
[weights.update(w) for w in weight_dicts]
# weights = {(k, v) for k, v in [t[1] for t in tfr_conditions]}
tfrs.append(pd.concat([t[0] for t in tfr_conditions if t[0] is not None]))
tfrs = pd.concat(tfrs)
return tfrs, weights
def make_tfr_contrasts(tfr_data, tfr_data_to_baseline, meta_data,
condition, baseline_time, baseline_per_condition=True):
# unpack:
condition_ind = meta_data.loc[meta_data[condition] == 1, "hash"]
if baseline_per_condition:
# apply condition ind, collapse across trials, and get baseline::
tfr_data_to_baseline = (tfr_data_to_baseline.loc[
tfr_data_to_baseline.index.isin(condition_ind, level='trial'), :]
.groupby(['freq', 'area']).mean())
baseline = baseline_per_sensor_get(
tfr_data_to_baseline, baseline_time=baseline_time)
# apply condition ind, and collapse across trials:
tfr_data_condition = (tfr_data.loc[
tfr_data.index.isin(condition_ind, level='trial'), :])
num_trials_in_condition = len(np.unique(
tfr_data_condition.index.get_level_values('trial')))
if num_trials_in_condition == 0:
return None, {condition: num_trials_in_condition}
tfr_data_condition = tfr_data_condition.groupby(['freq', 'area']).mean()
# apply baseline, and collapse across sensors:
tfr_data_condition = baseline_per_sensor_apply(
tfr_data_condition, baseline=baseline).groupby(['freq', 'area']).mean()
tfr_data_condition['condition'] = condition
tfr_data_condition = tfr_data_condition.set_index(
['condition', ], append=True, inplace=False)
tfr_data_condition = tfr_data_condition.reorder_levels(
['area', 'condition', 'freq'])
return tfr_data_condition, {condition: num_trials_in_condition}
@memory.cache(ignore=['cache'])
def single_conditions(conditions, data_glob, base_glob, meta_data,
baseline_time, baseline_per_condition=False,
n_jobs=1, cache=Cache(cache=False)):
tfr_condition, weights = load_tfr_contrast(
data_glob, base_glob, meta_data,
list(conditions), baseline_time, n_jobs=n_jobs,
baseline_per_condition=baseline_per_condition,
cache=cache)
return tfr_condition.groupby(
['area', 'condition', 'freq']).mean(), weights
@memory.cache(ignore=['cache'])
def pool_conditions(conditions, data_globs, base_globs, meta_data,
baseline_time, baseline_per_condition=False,
n_jobs=1, cache=Cache(cache=False)):
weights = {}
tfrs = {}
for i, (data_glob, base_glob) in enumerate(
zip(ensure_iter(data_globs), ensure_iter(base_globs))):
# tfr, weight = single_conditions(
# conditions, data_glob, base_glob, meta_data, baseline_time,
# n_jobs=n_jobs,
# cache=cache)
tfr, weight = load_tfr_contrast(
data_glob, base_glob, meta_data,
list(conditions), baseline_time, n_jobs=n_jobs,
baseline_per_condition=baseline_per_condition,
cache=cache)
tfrs[i] = tfr
weights[i] = weight
# Compute total trials per condition
total_weights = {}
for i, w in weights.items():
for k, v in w.items():
if k not in total_weights:
total_weights[k] = v
else:
total_weights[k] += v
# Apply weights to each tfr
ind_weights = {}
for k in total_weights.keys():
ind_weights[k] = []
for key in tfrs.keys():
tfr = tfrs[key]
for condition in total_weights.keys():
condition_ind = tfr.index.get_level_values(
'condition') == condition
w = weights[key][condition] / total_weights[condition]
tfr.loc[condition_ind, :] *= w
ind_weights[condition].append(w)
tfrs[key] = tfr
for condition, weights in ind_weights.items():
logging.info("weights for %s -> %s, sum=%f" %
(condition, str(weights), sum(weights)))
tfrs = pd.concat(tfrs.values()).groupby(
['area', 'condition', 'freq']).sum()
return tfrs
@memory.cache(ignore=['cache'])
def compute_contrast(contrasts, hemis, data_globstring, base_globstring,
meta_data, baseline_time, baseline_per_condition=True,
n_jobs=1, cache=Cache(cache=False)):
"""Compute a single contrast from tfr data
Args:
contrast: dict
Contains contrast names as keys and len==2 tuples as values. The
tuples contain a list of condition names first and then a set of
weights for each condition. Condition names identify columns in
the meta data that are one for each trial that belongs to
this condition.
hemi: str
Can be:
'lh_is_ipsi' if contrast is ipsi-contra hemi and left hemi is
ipsi.
'rh_is_ipsi' if contrast is ipis-contra and right hemi is ipsi
'avg' if contrast should be averaged across hemispheres
data_globstring: list
Each string in data_globstring selects a set of filenames if
passed through glob. Condition averages and baselines are then
computed for each group of filenames identified by one entry
in data_globstring. This is useful for, e.g. computing
conditions per session first, then averaging them and then
computing contrasts across sessions.
base_globstring: string or list
Same as data_globstring but selects data to use for baselining
meta_data: data frame
Meta data DataFrame with as many rows as trials.
baseline_time: tuple
"""
# load for all subjects:
tfr_condition = []
from functools import reduce
from itertools import product
conditions = set(
reduce(lambda x, y: x + y, [x[0] for x in contrasts.values()]))
tfr_condition = pool_conditions(conditions, data_globstring,
base_globstring, meta_data,
baseline_time, n_jobs=n_jobs,
cache=cache)
# Lower case all area names
# FIXME: Set all area names to lower case!
all_clusters, _, _, _ = atlas_glasser.get_clusters()
tfr_areas = np.array([a for a in tfr_condition.index.levels[
np.where(np.array(tfr_condition.index.names) == 'area')[0][0]]])
tfr_areas_lower = np.array([area.lower() for area in tfr_areas])
for cluster, areas in all_clusters.items():
new_areas = []
for area in areas:
idx = np.where(tfr_areas_lower == area.lower())[0]
if len(idx) == 1:
new_areas.append(tfr_areas[idx[0]])
all_clusters[cluster] = new_areas
# mean across sessions:
tfr_condition = tfr_condition.groupby(
['area', 'condition', 'freq']).mean()
cluster_contrasts = []
for cur_contrast, hemi, cluster in product(contrasts.items(), hemis,
all_clusters.keys()):
contrast, (conditions, weights) = cur_contrast
logging.info('Start computing contrast %s for cluster %s' %
(contrast, cluster))
right = []
left = []
for condition in conditions:
tfrs_rh = []
tfrs_lh = []
for area in all_clusters[cluster]:
area_idx = tfr_condition.index.isin([area], level='area')
condition_idx = tfr_condition.index.isin(
[condition], level='condition')
subset = tfr_condition.loc[area_idx & condition_idx].groupby(
['freq']).mean()
if 'rh' in area:
tfrs_rh.append(subset)
else:
tfrs_lh.append(subset)
# What happens when an area is not defined for both hemis?
if (len(tfrs_lh) == 0) and (len(tfrs_rh) == 0):
logging.warn('Skipping condition %s in cluster %s' %
(condition, cluster))
continue
try:
left.append(pd.concat(tfrs_lh))
except ValueError:
pass
try:
right.append(pd.concat(tfrs_rh))
except ValueError:
pass
if (len(left) == 0) and (len(right) == 0):
logging.warn('Skipping cluster %s' % (cluster))
continue
if hemi == 'rh_is_ipsi':
left, right = right, left
if 'is_ipsi' in hemi:
if not len(left) == len(right):
logging.warn('Skipping cluster %s: does not have the same number of lh/rh rois' %
(cluster))
continue
tfrs = [left[i] - right[i]
for i in range(len(left))]
else:
if (len(right) == 0) and (len(left) == len(weights)):
tfrs = left
elif (len(left) == 0) and (len(right) == len(weights)):
tfrs = right
else:
tfrs = [(right[i] + left[i]) / 2
for i in range(len(left))]
assert(len(tfrs) == len(weights))
tfrs = [tfr * weight for tfr, weight in zip(tfrs, weights)]
tfrs = reduce(lambda x, y: x + y, tfrs)
tfrs = tfrs.groupby('freq').mean()
tfrs.loc[:, 'cluster'] = cluster
tfrs.loc[:, 'contrast'] = contrast
tfrs.loc[:, 'hemi'] = hemi
cluster_contrasts.append(tfrs)
logging.info('Done compute contrast')
return pd.concat(cluster_contrasts)
def augment_data(meta, response_left, stimulus):
"""Augment meta data with fields for specific cases
Args:
meta: DataFrame
response_left: ndarray
1 if subject made a left_response / yes response
stimulus: ndarray
1 if a left_response is correct
"""
# add columns:
meta["all"] = 1
meta["left"] = response_left.astype(int)
meta["right"] = (~response_left).astype(int)
meta["hit"] = ((response_left == 1) & (stimulus == 1)).astype(int)
meta["fa"] = ((response_left == 1) & (stimulus == 0)).astype(int)
meta["miss"] = ((response_left == 0) | |
device_enrollment_disown_body: DeviceEnrollmentDisownBody
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DeviceEnrollmentDisownResponse
"""
kwargs['_return_http_data_only'] = True
return self.v1_device_enrollments_id_disown_post_with_http_info(id, **kwargs) # noqa: E501
def v1_device_enrollments_id_disown_post_with_http_info(self, id, **kwargs): # noqa: E501
"""Disown devices from the given Device Enrollment Instance # noqa: E501
Disowns devices from the given device enrollment instance # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_device_enrollments_id_disown_post_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: Device Enrollment Instance identifier (required)
:type id: str
:param device_enrollment_disown_body: List of device serial numbers to disown
:type device_enrollment_disown_body: DeviceEnrollmentDisownBody
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DeviceEnrollmentDisownResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id',
'device_enrollment_disown_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_device_enrollments_id_disown_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `v1_device_enrollments_id_disown_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'device_enrollment_disown_body' in local_var_params:
body_params = local_var_params['device_enrollment_disown_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "DeviceEnrollmentDisownResponse",
404: None,
500: None,
}
return self.api_client.call_api(
'/v1/device-enrollments/{id}/disown', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def v1_device_enrollments_id_get(self, id, **kwargs): # noqa: E501
"""Retrieve a Device Enrollment Instance with the supplied id # noqa: E501
Retrieves a Device Enrollment Instance with the supplied id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_device_enrollments_id_get(id, async_req=True)
>>> result = thread.get()
:param id: Device Enrollment Instance identifier (required)
:type id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DeviceEnrollmentInstance
"""
kwargs['_return_http_data_only'] = True
return self.v1_device_enrollments_id_get_with_http_info(id, **kwargs) # noqa: E501
def v1_device_enrollments_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieve a Device Enrollment Instance with the supplied id # noqa: E501
Retrieves a Device Enrollment Instance with the supplied id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_device_enrollments_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: Device Enrollment Instance identifier (required)
:type id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DeviceEnrollmentInstance, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_device_enrollments_id_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `v1_device_enrollments_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "DeviceEnrollmentInstance",
404: "ApiError",
}
return self.api_client.call_api(
'/v1/device-enrollments/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def v1_device_enrollments_id_history_get(self, id, **kwargs): # noqa: E501
"""Get sorted and paged Device Enrollment history objects # noqa: E501
Gets sorted and paged device enrollment history objects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_device_enrollments_id_history_get(id, async_req=True)
>>> result = thread.get()
:param id: Device Enrollment Instance identifier (required)
:type id: str
:param page:
:type page: int
:param page_size:
:type page_size: int
:param sort: Sorting criteria in the format: property,asc/desc. Default sort order is descending. Multiple sort criteria are supported and must be entered on separate lines in Swagger UI. In the URI the 'sort' query param is duplicated for each sort criterion, e.g., ...&sort=name%2Casc&sort=date%2Cdesc
:type sort: list[str]
:param filter: Query in the RSQL format, allowing to filter history notes collection. Default search is empty query - returning all results for the requested page. Fields allowed in the query: username, date, note, details. This param can be combined with paging and sorting. Example: search=username!=admin and details==*disabled* and date<2019-12-15
:type filter: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: HistorySearchResults
"""
kwargs['_return_http_data_only'] = True
return self.v1_device_enrollments_id_history_get_with_http_info(id, **kwargs) # noqa: E501
def v1_device_enrollments_id_history_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Get sorted and paged Device Enrollment history objects # noqa: E501
Gets sorted and paged device | |
"""Drawable objects
"""
from math import cos, pi, sin
from typing import ClassVar, List, Tuple
import pyglet
from .geometry import Point
C3F = Tuple[float, float, float]
C4F = Tuple[float, float, float, float]
C3I = Tuple[int, int, int]
C4I = Tuple[int, int, int, int]
class GeoDrawer:
"""A static class container of drawing methods.
"""
_VERTEX_MODE: ClassVar[str] = "v2f"
_COLOR_MODE: ClassVar[str] = "c3B"
@staticmethod
def draw_line_segment(
x1: float, y1: float, x2: float, y2: float, color: C3F
) -> None:
"""Draw a line segment.
Args:
x1 (float): Start x coordinate.
y1 (float): Start y coordinate.
x2 (float): End x coordinate.
y2 (float): End y coordinate.
color (Tuple[float, float, float]): RGB valued color.
"""
pyglet.graphics.draw(
2,
pyglet.gl.GL_LINE_STRIP,
(GeoDrawer._VERTEX_MODE, [x1, y1, x2, y2]),
(GeoDrawer._COLOR_MODE, color * 2),
)
@staticmethod
def draw_circle(x: float, y: float, r: float, color: C3F, splits: int = 30) -> None:
"""Draw a cricle.
Args:
x (float): The circle center's x coordinate.
y (float): The circle center's y coordinate.
r (float): The circle's radius.
color (Tuple[float, float, float]): The fill color of the circle.
splits (int, optional): How detailed the polygon emulating a circle should
be. Higher values increase detail.
"""
vertices = [x, y]
for i in range(splits + 1):
ang = 2 * pi * i / splits
vertices.append(x + cos(ang) * r)
vertices.append(y + sin(ang) * r)
pyglet.graphics.draw(
splits + 2,
pyglet.gl.GL_TRIANGLE_FAN,
(GeoDrawer._VERTEX_MODE, vertices),
(GeoDrawer._COLOR_MODE, color * (splits + 2)),
)
@staticmethod
def draw_point(point: Point, size: float, color: C3F) -> None:
"""Draw a point.
Args:
point (Point): The point object draw.
size (float): The radius of the circle used to represent the point.
color (Tuple[float, float, float]): The fill color.
"""
GeoDrawer.draw_circle(point.x, point.y, size, color)
@staticmethod
def draw_rectangle(x: float, y: float, w: float, h: float, color: C3F) -> None:
"""Draw a rectangle.
Args:
x (float): South west corner's x coordinate.
y (float): South west corner's y coordinate.
w (float): Horizontal length.
h (float): Vertical length.
color (Tuple[float, float, float]): Fill color.
"""
pyglet.graphics.draw(
4,
pyglet.gl.GL_TRIANGLE_STRIP,
(GeoDrawer._VERTEX_MODE, [x, y, x, y + h, x + w, y, x + w, y + h]),
(GeoDrawer._COLOR_MODE, color * 4),
)
@staticmethod
def draw_point_path(pnt_path: List[Point], color: C3F, point_size: float) -> None:
"""Draw a list of point and a line segment between to adjacent points.
Args:
pnt_path (List[Point]): The list of points to draw in order.
color (Tuple[float, float, float]): Color of line segments and point fills.
point_size (float): Radius of the circle representing the points.
"""
n = len(pnt_path)
if n > 0:
if n > 1:
vertices = [coord for pnt in pnt_path for coord in pnt.coords()]
pyglet.graphics.draw(
n,
pyglet.gl.GL_LINE_STRIP,
(GeoDrawer._VERTEX_MODE, vertices),
(GeoDrawer._COLOR_MODE, color * n),
)
for pnt in pnt_path:
GeoDrawer.draw_point(pnt, point_size, color)
class Color:
"""A collection of color constants.
"""
# Scraped with bs4 from https://www.rapidtables.com/web/color/RGB_Color.html.
MAROON: ClassVar[C3I] = (128, 0, 0)
DARK_RED: ClassVar[C3I] = (139, 0, 0)
BROWN: ClassVar[C3I] = (165, 42, 42)
FIREBRICK: ClassVar[C3I] = (178, 34, 34)
CRIMSON: ClassVar[C3I] = (220, 20, 60)
RED: ClassVar[C3I] = (255, 0, 0)
TOMATO: ClassVar[C3I] = (255, 99, 71)
CORAL: ClassVar[C3I] = (255, 127, 80)
INDIAN_RED: ClassVar[C3I] = (205, 92, 92)
LIGHT_CORAL: ClassVar[C3I] = (240, 128, 128)
DARK_SALMON: ClassVar[C3I] = (233, 150, 122)
SALMON: ClassVar[C3I] = (250, 128, 114)
LIGHT_SALMON: ClassVar[C3I] = (255, 160, 122)
ORANGE_RED: ClassVar[C3I] = (255, 69, 0)
DARK_ORANGE: ClassVar[C3I] = (255, 140, 0)
ORANGE: ClassVar[C3I] = (255, 165, 0)
GOLD: ClassVar[C3I] = (255, 215, 0)
DARK_GOLDEN_ROD: ClassVar[C3I] = (184, 134, 11)
GOLDEN_ROD: ClassVar[C3I] = (218, 165, 32)
PALE_GOLDEN_ROD: ClassVar[C3I] = (238, 232, 170)
DARK_KHAKI: ClassVar[C3I] = (189, 183, 107)
KHAKI: ClassVar[C3I] = (240, 230, 140)
OLIVE: ClassVar[C3I] = (128, 128, 0)
YELLOW: ClassVar[C3I] = (255, 255, 0)
YELLOW_GREEN: ClassVar[C3I] = (154, 205, 50)
DARK_OLIVE_GREEN: ClassVar[C3I] = (85, 107, 47)
OLIVE_DRAB: ClassVar[C3I] = (107, 142, 35)
LAWN_GREEN: ClassVar[C3I] = (124, 252, 0)
CHART_REUSE: ClassVar[C3I] = (127, 255, 0)
GREEN_YELLOW: ClassVar[C3I] = (173, 255, 47)
DARK_GREEN: ClassVar[C3I] = (0, 100, 0)
GREEN: ClassVar[C3I] = (0, 128, 0)
FOREST_GREEN: ClassVar[C3I] = (34, 139, 34)
LIME: ClassVar[C3I] = (0, 255, 0)
LIME_GREEN: ClassVar[C3I] = (50, 205, 50)
LIGHT_GREEN: ClassVar[C3I] = (144, 238, 144)
PALE_GREEN: ClassVar[C3I] = (152, 251, 152)
DARK_SEA_GREEN: ClassVar[C3I] = (143, 188, 143)
MEDIUM_SPRING_GREEN: ClassVar[C3I] = (0, 250, 154)
SPRING_GREEN: ClassVar[C3I] = (0, 255, 127)
SEA_GREEN: ClassVar[C3I] = (46, 139, 87)
MEDIUM_AQUA_MARINE: ClassVar[C3I] = (102, 205, 170)
MEDIUM_SEA_GREEN: ClassVar[C3I] = (60, 179, 113)
LIGHT_SEA_GREEN: ClassVar[C3I] = (32, 178, 170)
DARK_SLATE_GRAY: ClassVar[C3I] = (47, 79, 79)
TEAL: ClassVar[C3I] = (0, 128, 128)
DARK_CYAN: ClassVar[C3I] = (0, 139, 139)
AQUA: ClassVar[C3I] = (0, 255, 255)
CYAN: ClassVar[C3I] = (0, 255, 255)
LIGHT_CYAN: ClassVar[C3I] = (224, 255, 255)
DARK_TURQUOISE: ClassVar[C3I] = (0, 206, 209)
TURQUOISE: ClassVar[C3I] = (64, 224, 208)
MEDIUM_TURQUOISE: ClassVar[C3I] = (72, 209, 204)
PALE_TURQUOISE: ClassVar[C3I] = (175, 238, 238)
AQUA_MARINE: ClassVar[C3I] = (127, 255, 212)
POWDER_BLUE: ClassVar[C3I] = (176, 224, 230)
CADET_BLUE: ClassVar[C3I] = (95, 158, 160)
STEEL_BLUE: ClassVar[C3I] = (70, 130, 180)
CORN_FLOWER_BLUE: ClassVar[C3I] = (100, 149, 237)
DEEP_SKY_BLUE: ClassVar[C3I] = (0, 191, 255)
DODGER_BLUE: ClassVar[C3I] = (30, 144, 255)
LIGHT_BLUE: ClassVar[C3I] = (173, 216, 230)
SKY_BLUE: ClassVar[C3I] = (135, 206, 235)
LIGHT_SKY_BLUE: ClassVar[C3I] = (135, 206, 250)
MIDNIGHT_BLUE: ClassVar[C3I] = (25, 25, 112)
NAVY: ClassVar[C3I] = (0, 0, 128)
DARK_BLUE: ClassVar[C3I] = (0, 0, 139)
MEDIUM_BLUE: ClassVar[C3I] = (0, 0, 205)
BLUE: ClassVar[C3I] = (0, 0, 255)
ROYAL_BLUE: ClassVar[C3I] = (65, 105, 225)
BLUE_VIOLET: ClassVar[C3I] = (138, 43, 226)
INDIGO: ClassVar[C3I] = (75, 0, 130)
DARK_SLATE_BLUE: ClassVar[C3I] = (72, 61, 139)
SLATE_BLUE: ClassVar[C3I] = (106, 90, 205)
MEDIUM_SLATE_BLUE: ClassVar[C3I] = (123, 104, 238)
MEDIUM_PURPLE: ClassVar[C3I] = (147, 112, 219)
DARK_MAGENTA: ClassVar[C3I] = (139, 0, 139)
DARK_VIOLET: ClassVar[C3I] = (148, 0, 211)
DARK_ORCHID: ClassVar[C3I] = (153, 50, 204)
MEDIUM_ORCHID: ClassVar[C3I] = (186, 85, 211)
PURPLE: ClassVar[C3I] = (128, 0, 128)
THISTLE: ClassVar[C3I] = (216, 191, 216)
PLUM: ClassVar[C3I] = (221, 160, 221)
VIOLET: ClassVar[C3I] = (238, 130, 238)
MAGENTA: ClassVar[C3I] = (255, 0, 255)
ORCHID: ClassVar[C3I] = (218, 112, 214)
MEDIUM_VIOLET_RED: ClassVar[C3I] = (199, 21, 133)
PALE_VIOLET_RED: ClassVar[C3I] = (219, 112, 147)
DEEP_PINK: ClassVar[C3I] = (255, 20, 147)
HOT_PINK: ClassVar[C3I] = (255, 105, 180)
LIGHT_PINK: ClassVar[C3I] = (255, 182, 193)
PINK: ClassVar[C3I] = (255, 192, 203)
ANTIQUE_WHITE: ClassVar[C3I] = (250, 235, 215)
BEIGE: ClassVar[C3I] = (245, 245, 220)
BISQUE: ClassVar[C3I] = (255, 228, 196)
BLANCHED_ALMOND: ClassVar[C3I] = (255, 235, 205)
WHEAT: ClassVar[C3I] = (245, 222, 179)
CORN_SILK: ClassVar[C3I] = (255, 248, 220)
LEMON_CHIFFON: ClassVar[C3I] = (255, 250, 205)
LIGHT_GOLDEN_ROD_YELLOW: ClassVar[C3I] = (250, 250, 210)
LIGHT_YELLOW: ClassVar[C3I] = (255, 255, 224)
SADDLE_BROWN: ClassVar[C3I] = (139, 69, 19)
SIENNA: ClassVar[C3I] = (160, 82, 45)
CHOCOLATE: ClassVar[C3I] = (210, 105, 30)
PERU: ClassVar[C3I] = (205, 133, 63)
SANDY_BROWN: ClassVar[C3I] = (244, 164, 96)
BURLY_WOOD: ClassVar[C3I] = (222, 184, 135)
TAN: ClassVar[C3I] = (210, 180, 140)
ROSY_BROWN: ClassVar[C3I] = (188, 143, 143)
MOCCASIN: ClassVar[C3I] = (255, 228, 181)
NAVAJO_WHITE: ClassVar[C3I] = (255, 222, 173)
PEACH_PUFF: ClassVar[C3I] = (255, 218, 185)
MISTY_ROSE: ClassVar[C3I] = (255, 228, 225)
LAVENDER_BLUSH: ClassVar[C3I] = (255, 240, 245)
LINEN: ClassVar[C3I] = (250, 240, 230)
OLD_LACE: ClassVar[C3I] = (253, 245, 230)
PAPAYA_WHIP: ClassVar[C3I] = (255, 239, 213)
SEA_SHELL: ClassVar[C3I] = (255, 245, 238)
MINT_CREAM: ClassVar[C3I] = (245, 255, 250)
SLATE_GRAY: ClassVar[C3I] = (112, 128, 144)
LIGHT_SLATE_GRAY: ClassVar[C3I] = (119, 136, 153)
LIGHT_STEEL_BLUE: ClassVar[C3I] = (176, 196, 222)
LAVENDER: ClassVar[C3I] = (230, 230, 250)
FLORAL_WHITE: ClassVar[C3I] = (255, 250, 240)
ALICE_BLUE: ClassVar[C3I] = (240, 248, 255)
GHOST_WHITE: ClassVar[C3I] = (248, 248, 255)
HONEYDEW: ClassVar[C3I] = (240, 255, 240)
IVORY: ClassVar[C3I] = (255, 255, 240)
AZURE: ClassVar[C3I] = (240, 255, 255)
SNOW: ClassVar[C3I] = (255, 250, 250)
BLACK: ClassVar[C3I] = (0, 0, 0)
DIM_GRAY: ClassVar[C3I] = (105, 105, 105)
GRAY: ClassVar[C3I] = (128, 128, 128)
DARK_GRAY: ClassVar[C3I] = (169, 169, 169)
SILVER: ClassVar[C3I] = (192, 192, 192)
LIGHT_GRAY: ClassVar[C3I] = (211, 211, 211)
GAINSBORO: ClassVar[C3I] = (220, 220, 220)
WHITE_SMOKE: ClassVar[C3I] = (245, 245, 245)
WHITE: ClassVar[C3I] = (255, 255, 255)
@staticmethod
| |
<filename>resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtCore/Qt.py<gh_stars>1-10
# encoding: utf-8
# module PySide.QtCore
# from C:\Python27\lib\site-packages\PySide\QtCore.pyd
# by generator 1.147
# no doc
# imports
import Shiboken as __Shiboken
class Qt(__Shiboken.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
AA_AttributeCount = PySide.QtCore.Qt.ApplicationAttribute.AA_AttributeCount
AA_CaptureMultimediaKeys = PySide.QtCore.Qt.ApplicationAttribute.AA_CaptureMultimediaKeys
AA_DontCreateNativeWidgetSiblings = PySide.QtCore.Qt.ApplicationAttribute.AA_DontCreateNativeWidgetSiblings
AA_DontShowIconsInMenus = PySide.QtCore.Qt.ApplicationAttribute.AA_DontShowIconsInMenus
AA_DontUseNativeMenuBar = PySide.QtCore.Qt.ApplicationAttribute.AA_DontUseNativeMenuBar
AA_ImmediateWidgetCreation = PySide.QtCore.Qt.ApplicationAttribute.AA_ImmediateWidgetCreation
AA_MacDontSwapCtrlAndMeta = PySide.QtCore.Qt.ApplicationAttribute.AA_MacDontSwapCtrlAndMeta
AA_MacPluginApplication = PySide.QtCore.Qt.ApplicationAttribute.AA_MacPluginApplication
AA_MSWindowsUseDirect3DByDefault = PySide.QtCore.Qt.ApplicationAttribute.AA_MSWindowsUseDirect3DByDefault
AA_NativeWindows = PySide.QtCore.Qt.ApplicationAttribute.AA_NativeWindows
AA_S60DisablePartialScreenInputMode = PySide.QtCore.Qt.ApplicationAttribute.AA_S60DisablePartialScreenInputMode
AA_S60DontConstructApplicationPanes = PySide.QtCore.Qt.ApplicationAttribute.AA_S60DontConstructApplicationPanes
AA_X11InitThreads = PySide.QtCore.Qt.ApplicationAttribute.AA_X11InitThreads
AbsoluteSize = PySide.QtCore.Qt.SizeMode.AbsoluteSize
AccessibleDescriptionRole = PySide.QtCore.Qt.ItemDataRole.AccessibleDescriptionRole
AccessibleTextRole = PySide.QtCore.Qt.ItemDataRole.AccessibleTextRole
ActionMask = PySide.QtCore.Qt.DropAction.ActionMask
ActionsContextMenu = PySide.QtCore.Qt.ContextMenuPolicy.ActionsContextMenu
ActiveWindowFocusReason = PySide.QtCore.Qt.FocusReason.ActiveWindowFocusReason
AlignAbsolute = PySide.QtCore.Qt.AlignmentFlag.AlignAbsolute
AlignBottom = PySide.QtCore.Qt.AlignmentFlag.AlignBottom
AlignCenter = PySide.QtCore.Qt.AlignmentFlag.AlignCenter
AlignHCenter = PySide.QtCore.Qt.AlignmentFlag.AlignHCenter
AlignHorizontal_Mask = PySide.QtCore.Qt.AlignmentFlag.AlignHorizontal_Mask
AlignJustify = PySide.QtCore.Qt.AlignmentFlag.AlignJustify
AlignLeading = PySide.QtCore.Qt.AlignmentFlag.AlignLeading
AlignLeft = PySide.QtCore.Qt.AlignmentFlag.AlignLeft
Alignment = None # (!) real value is "<type 'Alignment'>"
AlignmentFlag = None # (!) real value is "<type 'PySide.QtCore.Qt.AlignmentFlag'>"
AlignRight = PySide.QtCore.Qt.AlignmentFlag.AlignRight
AlignTop = PySide.QtCore.Qt.AlignmentFlag.AlignTop
AlignTrailing = PySide.QtCore.Qt.AlignmentFlag.AlignTrailing
AlignVCenter = PySide.QtCore.Qt.AlignmentFlag.AlignVCenter
AlignVertical_Mask = PySide.QtCore.Qt.AlignmentFlag.AlignVertical_Mask
AllDockWidgetAreas = PySide.QtCore.Qt.DockWidgetArea.AllDockWidgetAreas
AllToolBarAreas = PySide.QtCore.Qt.ToolBarArea.AllToolBarAreas
AlphaDither_Mask = PySide.QtCore.Qt.ImageConversionFlag.AlphaDither_Mask
ALT = PySide.QtCore.Qt.Modifier.ALT
AltModifier = PySide.QtCore.Qt.KeyboardModifier.AltModifier
AnchorAttribute = None # (!) real value is "<type 'PySide.QtCore.Qt.AnchorAttribute'>"
AnchorBottom = PySide.QtCore.Qt.AnchorPoint.AnchorBottom
AnchorHorizontalCenter = PySide.QtCore.Qt.AnchorPoint.AnchorHorizontalCenter
AnchorHref = PySide.QtCore.Qt.AnchorAttribute.AnchorHref
AnchorLeft = PySide.QtCore.Qt.AnchorPoint.AnchorLeft
AnchorName = PySide.QtCore.Qt.AnchorAttribute.AnchorName
AnchorPoint = None # (!) real value is "<type 'PySide.QtCore.Qt.AnchorPoint'>"
AnchorRight = PySide.QtCore.Qt.AnchorPoint.AnchorRight
AnchorTop = PySide.QtCore.Qt.AnchorPoint.AnchorTop
AnchorVerticalCenter = PySide.QtCore.Qt.AnchorPoint.AnchorVerticalCenter
ApplicationAttribute = None # (!) real value is "<type 'PySide.QtCore.Qt.ApplicationAttribute'>"
ApplicationModal = PySide.QtCore.Qt.WindowModality.ApplicationModal
ApplicationShortcut = PySide.QtCore.Qt.ShortcutContext.ApplicationShortcut
ArrowCursor = PySide.QtCore.Qt.CursorShape.ArrowCursor
ArrowType = None # (!) real value is "<type 'PySide.QtCore.Qt.ArrowType'>"
AscendingOrder = PySide.QtCore.Qt.SortOrder.AscendingOrder
AspectRatioMode = None # (!) real value is "<type 'PySide.QtCore.Qt.AspectRatioMode'>"
AutoColor = PySide.QtCore.Qt.ImageConversionFlag.AutoColor
AutoCompatConnection = PySide.QtCore.Qt.ConnectionType.AutoCompatConnection
AutoConnection = PySide.QtCore.Qt.ConnectionType.AutoConnection
AutoDither = PySide.QtCore.Qt.ImageConversionFlag.AutoDither
AutoText = PySide.QtCore.Qt.TextFormat.AutoText
AvoidDither = PySide.QtCore.Qt.ImageConversionFlag.AvoidDither
Axis = None # (!) real value is "<type 'PySide.QtCore.Qt.Axis'>"
BackgroundColorRole = PySide.QtCore.Qt.ItemDataRole.BackgroundColorRole
BackgroundRole = PySide.QtCore.Qt.ItemDataRole.BackgroundRole
BacktabFocusReason = PySide.QtCore.Qt.FocusReason.BacktabFocusReason
BDiagPattern = PySide.QtCore.Qt.BrushStyle.BDiagPattern
BevelJoin = PySide.QtCore.Qt.PenJoinStyle.BevelJoin
BGMode = None # (!) real value is "<type 'PySide.QtCore.Qt.BGMode'>"
BitmapCursor = PySide.QtCore.Qt.CursorShape.BitmapCursor
black = PySide.QtCore.Qt.GlobalColor.black
BlankCursor = PySide.QtCore.Qt.CursorShape.BlankCursor
BlockingQueuedConnection = PySide.QtCore.Qt.ConnectionType.BlockingQueuedConnection
blue = PySide.QtCore.Qt.GlobalColor.blue
BottomDockWidgetArea = PySide.QtCore.Qt.DockWidgetArea.BottomDockWidgetArea
BottomLeftCorner = PySide.QtCore.Qt.Corner.BottomLeftCorner
BottomLeftSection = PySide.QtCore.Qt.WindowFrameSection.BottomLeftSection
BottomRightCorner = PySide.QtCore.Qt.Corner.BottomRightCorner
BottomRightSection = PySide.QtCore.Qt.WindowFrameSection.BottomRightSection
BottomSection = PySide.QtCore.Qt.WindowFrameSection.BottomSection
BottomToolBarArea = PySide.QtCore.Qt.ToolBarArea.BottomToolBarArea
BrushStyle = None # (!) real value is "<type 'PySide.QtCore.Qt.BrushStyle'>"
BusyCursor = PySide.QtCore.Qt.CursorShape.BusyCursor
BypassGraphicsProxyWidget = PySide.QtCore.Qt.WindowType.BypassGraphicsProxyWidget
CaseInsensitive = PySide.QtCore.Qt.CaseSensitivity.CaseInsensitive
CaseSensitive = PySide.QtCore.Qt.CaseSensitivity.CaseSensitive
CaseSensitivity = None # (!) real value is "<type 'PySide.QtCore.Qt.CaseSensitivity'>"
Checked = PySide.QtCore.Qt.CheckState.Checked
CheckState = None # (!) real value is "<type 'PySide.QtCore.Qt.CheckState'>"
CheckStateRole = PySide.QtCore.Qt.ItemDataRole.CheckStateRole
ClickFocus = PySide.QtCore.Qt.FocusPolicy.ClickFocus
ClipOperation = None # (!) real value is "<type 'PySide.QtCore.Qt.ClipOperation'>"
ClosedHandCursor = PySide.QtCore.Qt.CursorShape.ClosedHandCursor
color0 = PySide.QtCore.Qt.GlobalColor.color0
color1 = PySide.QtCore.Qt.GlobalColor.color1
ColorMode_Mask = PySide.QtCore.Qt.ImageConversionFlag.ColorMode_Mask
ColorOnly = PySide.QtCore.Qt.ImageConversionFlag.ColorOnly
ConicalGradientPattern = PySide.QtCore.Qt.BrushStyle.ConicalGradientPattern
ConnectionType = None # (!) real value is "<type 'PySide.QtCore.Qt.ConnectionType'>"
ContainsItemBoundingRect = PySide.QtCore.Qt.ItemSelectionMode.ContainsItemBoundingRect
ContainsItemShape = PySide.QtCore.Qt.ItemSelectionMode.ContainsItemShape
ContextMenuPolicy = None # (!) real value is "<type 'PySide.QtCore.Qt.ContextMenuPolicy'>"
ControlModifier = PySide.QtCore.Qt.KeyboardModifier.ControlModifier
CoordinateSystem = None # (!) real value is "<type 'PySide.QtCore.Qt.CoordinateSystem'>"
CopyAction = PySide.QtCore.Qt.DropAction.CopyAction
Corner = None # (!) real value is "<type 'PySide.QtCore.Qt.Corner'>"
CrossCursor = PySide.QtCore.Qt.CursorShape.CrossCursor
CrossPattern = PySide.QtCore.Qt.BrushStyle.CrossPattern
CTRL = PySide.QtCore.Qt.Modifier.CTRL
CursorMoveStyle = None # (!) real value is "<type 'PySide.QtCore.Qt.CursorMoveStyle'>"
CursorShape = None # (!) real value is "<type 'PySide.QtCore.Qt.CursorShape'>"
CustomContextMenu = PySide.QtCore.Qt.ContextMenuPolicy.CustomContextMenu
CustomCursor = PySide.QtCore.Qt.CursorShape.CustomCursor
CustomDashLine = PySide.QtCore.Qt.PenStyle.CustomDashLine
CustomGesture = PySide.QtCore.Qt.GestureType.CustomGesture
CustomizeWindowHint = PySide.QtCore.Qt.WindowType.CustomizeWindowHint
cyan = PySide.QtCore.Qt.GlobalColor.cyan
darkBlue = PySide.QtCore.Qt.GlobalColor.darkBlue
darkCyan = PySide.QtCore.Qt.GlobalColor.darkCyan
darkGray = PySide.QtCore.Qt.GlobalColor.darkGray
darkGreen = PySide.QtCore.Qt.GlobalColor.darkGreen
darkMagenta = PySide.QtCore.Qt.GlobalColor.darkMagenta
darkRed = PySide.QtCore.Qt.GlobalColor.darkRed
darkYellow = PySide.QtCore.Qt.GlobalColor.darkYellow
DashDotDotLine = PySide.QtCore.Qt.PenStyle.DashDotDotLine
DashDotLine = PySide.QtCore.Qt.PenStyle.DashDotLine
DashLine = PySide.QtCore.Qt.PenStyle.DashLine
DateFormat = None # (!) real value is "<type 'PySide.QtCore.Qt.DateFormat'>"
DayOfWeek = None # (!) real value is "<type 'PySide.QtCore.Qt.DayOfWeek'>"
DecorationPropertyRole = PySide.QtCore.Qt.ItemDataRole.DecorationPropertyRole
DecorationRole = PySide.QtCore.Qt.ItemDataRole.DecorationRole
DefaultContextMenu = PySide.QtCore.Qt.ContextMenuPolicy.DefaultContextMenu
DefaultLocaleLongDate = PySide.QtCore.Qt.DateFormat.DefaultLocaleLongDate
DefaultLocaleShortDate = PySide.QtCore.Qt.DateFormat.DefaultLocaleShortDate
Dense1Pattern = PySide.QtCore.Qt.BrushStyle.Dense1Pattern
Dense2Pattern = PySide.QtCore.Qt.BrushStyle.Dense2Pattern
Dense3Pattern = PySide.QtCore.Qt.BrushStyle.Dense3Pattern
Dense4Pattern = PySide.QtCore.Qt.BrushStyle.Dense4Pattern
Dense5Pattern = PySide.QtCore.Qt.BrushStyle.Dense5Pattern
Dense6Pattern = PySide.QtCore.Qt.BrushStyle.Dense6Pattern
Dense7Pattern = PySide.QtCore.Qt.BrushStyle.Dense7Pattern
DescendingOrder = PySide.QtCore.Qt.SortOrder.DescendingOrder
Desktop = PySide.QtCore.Qt.WindowType.Desktop
DeviceCoordinates = PySide.QtCore.Qt.CoordinateSystem.DeviceCoordinates
DiagCrossPattern = PySide.QtCore.Qt.BrushStyle.DiagCrossPattern
Dialog = PySide.QtCore.Qt.WindowType.Dialog
DiffuseAlphaDither = PySide.QtCore.Qt.ImageConversionFlag.DiffuseAlphaDither
DiffuseDither = PySide.QtCore.Qt.ImageConversionFlag.DiffuseDither
DirectConnection = PySide.QtCore.Qt.ConnectionType.DirectConnection
DisplayPropertyRole = PySide.QtCore.Qt.ItemDataRole.DisplayPropertyRole
DisplayRole = PySide.QtCore.Qt.ItemDataRole.DisplayRole
DitherMode_Mask = PySide.QtCore.Qt.ImageConversionFlag.DitherMode_Mask
Dither_Mask = PySide.QtCore.Qt.ImageConversionFlag.Dither_Mask
DockWidgetArea = None # (!) real value is "<type 'PySide.QtCore.Qt.DockWidgetArea'>"
DockWidgetAreas = None # (!) real value is "<type 'DockWidgetAreas'>"
DockWidgetAreaSizes = None # (!) real value is "<type 'PySide.QtCore.Qt.DockWidgetAreaSizes'>"
DockWidgetArea_Mask = PySide.QtCore.Qt.DockWidgetArea.DockWidgetArea_Mask
DontStartGestureOnChildren = PySide.QtCore.Qt.GestureFlag.DontStartGestureOnChildren
DotLine = PySide.QtCore.Qt.PenStyle.DotLine
DownArrow = PySide.QtCore.Qt.ArrowType.DownArrow
DragCopyCursor = PySide.QtCore.Qt.CursorShape.DragCopyCursor
DragLinkCursor = PySide.QtCore.Qt.CursorShape.DragLinkCursor
DragMoveCursor = PySide.QtCore.Qt.CursorShape.DragMoveCursor
Drawer = PySide.QtCore.Qt.WindowType.Drawer
DropAction = None # (!) real value is "<type 'PySide.QtCore.Qt.DropAction'>"
DropActions = None # (!) real value is "<type 'DropActions'>"
EditRole = PySide.QtCore.Qt.ItemDataRole.EditRole
ElideLeft = PySide.QtCore.Qt.TextElideMode.ElideLeft
ElideMiddle = PySide.QtCore.Qt.TextElideMode.ElideMiddle
ElideNone = PySide.QtCore.Qt.TextElideMode.ElideNone
ElideRight = PySide.QtCore.Qt.TextElideMode.ElideRight
EventPriority = None # (!) real value is "<type 'PySide.QtCore.Qt.EventPriority'>"
ExactHit = PySide.QtCore.Qt.HitTestAccuracy.ExactHit
FastTransformation = PySide.QtCore.Qt.TransformationMode.FastTransformation
FDiagPattern = PySide.QtCore.Qt.BrushStyle.FDiagPattern
FillRule = None # (!) real value is "<type 'PySide.QtCore.Qt.FillRule'>"
FlatCap = PySide.QtCore.Qt.PenCapStyle.FlatCap
FocusPolicy = None # (!) real value is "<type 'PySide.QtCore.Qt.FocusPolicy'>"
FocusReason = None # (!) real value is "<type 'PySide.QtCore.Qt.FocusReason'>"
FontRole = PySide.QtCore.Qt.ItemDataRole.FontRole
ForbiddenCursor = PySide.QtCore.Qt.CursorShape.ForbiddenCursor
ForegroundRole = PySide.QtCore.Qt.ItemDataRole.ForegroundRole
FramelessWindowHint = PySide.QtCore.Qt.WindowType.FramelessWindowHint
Friday = PySide.QtCore.Qt.DayOfWeek.Friday
FuzzyHit = PySide.QtCore.Qt.HitTestAccuracy.FuzzyHit
GestureCanceled = PySide.QtCore.Qt.GestureState.GestureCanceled
GestureFinished = PySide.QtCore.Qt.GestureState.GestureFinished
GestureFlag = None # (!) real value is "<type 'PySide.QtCore.Qt.GestureFlag'>"
GestureFlags = None # (!) real value is "<type 'GestureFlags'>"
GestureStarted = PySide.QtCore.Qt.GestureState.GestureStarted
GestureState = None # (!) real value is "<type 'PySide.QtCore.Qt.GestureState'>"
GestureType = None # (!) real value is "<type 'PySide.QtCore.Qt.GestureType'>"
GestureUpdated = PySide.QtCore.Qt.GestureState.GestureUpdated
GlobalColor = None # (!) real value is "<type 'PySide.QtCore.Qt.GlobalColor'>"
gray = PySide.QtCore.Qt.GlobalColor.gray
green = PySide.QtCore.Qt.GlobalColor.green
GroupSwitchModifier = PySide.QtCore.Qt.KeyboardModifier.GroupSwitchModifier
HighEventPriority = PySide.QtCore.Qt.EventPriority.HighEventPriority
HitTestAccuracy = None # (!) real value is "<type 'PySide.QtCore.Qt.HitTestAccuracy'>"
Horizontal = PySide.QtCore.Qt.Orientation.Horizontal
HorPattern = PySide.QtCore.Qt.BrushStyle.HorPattern
IBeamCursor = PySide.QtCore.Qt.CursorShape.IBeamCursor
IgnoreAction = PySide.QtCore.Qt.DropAction.IgnoreAction
IgnoreAspectRatio = PySide.QtCore.Qt.AspectRatioMode.IgnoreAspectRatio
IgnoredGesturesPropagateToParent = PySide.QtCore.Qt.GestureFlag.IgnoredGesturesPropagateToParent
ImageConversionFlag = None # (!) real value is "<type 'PySide.QtCore.Qt.ImageConversionFlag'>"
ImageConversionFlags = None # (!) real value is "<type 'ImageConversionFlags'>"
ImAnchorPosition = PySide.QtCore.Qt.InputMethodQuery.ImAnchorPosition
ImCurrentSelection = PySide.QtCore.Qt.InputMethodQuery.ImCurrentSelection
ImCursorPosition = PySide.QtCore.Qt.InputMethodQuery.ImCursorPosition
ImFont = PySide.QtCore.Qt.InputMethodQuery.ImFont
ImhDialableCharactersOnly = PySide.QtCore.Qt.InputMethodHint.ImhDialableCharactersOnly
ImhDigitsOnly = PySide.QtCore.Qt.InputMethodHint.ImhDigitsOnly
ImhEmailCharactersOnly = PySide.QtCore.Qt.InputMethodHint.ImhEmailCharactersOnly
ImhExclusiveInputMask = PySide.QtCore.Qt.InputMethodHint.ImhExclusiveInputMask
ImhFormattedNumbersOnly = PySide.QtCore.Qt.InputMethodHint.ImhFormattedNumbersOnly
ImhHiddenText = PySide.QtCore.Qt.InputMethodHint.ImhHiddenText
ImhLowercaseOnly = PySide.QtCore.Qt.InputMethodHint.ImhLowercaseOnly
ImhNoAutoUppercase = PySide.QtCore.Qt.InputMethodHint.ImhNoAutoUppercase
ImhNone = PySide.QtCore.Qt.InputMethodHint.ImhNone
ImhNoPredictiveText = PySide.QtCore.Qt.InputMethodHint.ImhNoPredictiveText
ImhPreferLowercase = PySide.QtCore.Qt.InputMethodHint.ImhPreferLowercase
ImhPreferNumbers = PySide.QtCore.Qt.InputMethodHint.ImhPreferNumbers
ImhPreferUppercase = PySide.QtCore.Qt.InputMethodHint.ImhPreferUppercase
ImhUppercaseOnly = PySide.QtCore.Qt.InputMethodHint.ImhUppercaseOnly
ImhUrlCharactersOnly = PySide.QtCore.Qt.InputMethodHint.ImhUrlCharactersOnly
ImMaximumTextLength = PySide.QtCore.Qt.InputMethodQuery.ImMaximumTextLength
ImMicroFocus = PySide.QtCore.Qt.InputMethodQuery.ImMicroFocus
ImSurroundingText = PySide.QtCore.Qt.InputMethodQuery.ImSurroundingText
InitialSortOrderRole = PySide.QtCore.Qt.ItemDataRole.InitialSortOrderRole
InputMethodHint = None # (!) real value is "<type 'PySide.QtCore.Qt.InputMethodHint'>"
InputMethodHints = None # (!) real value is "<type 'InputMethodHints'>"
InputMethodQuery = None # (!) real value is "<type 'PySide.QtCore.Qt.InputMethodQuery'>"
IntersectClip = PySide.QtCore.Qt.ClipOperation.IntersectClip
IntersectsItemBoundingRect = PySide.QtCore.Qt.ItemSelectionMode.IntersectsItemBoundingRect
IntersectsItemShape = PySide.QtCore.Qt.ItemSelectionMode.IntersectsItemShape
ISODate = PySide.QtCore.Qt.DateFormat.ISODate
ItemDataRole = None # (!) real value is "<type 'PySide.QtCore.Qt.ItemDataRole'>"
ItemFlag = None # (!) real value is "<type 'PySide.QtCore.Qt.ItemFlag'>"
ItemFlags = None # (!) real value is "<type 'ItemFlags'>"
ItemIsDragEnabled = PySide.QtCore.Qt.ItemFlag.ItemIsDragEnabled
ItemIsDropEnabled = PySide.QtCore.Qt.ItemFlag.ItemIsDropEnabled
ItemIsEditable = PySide.QtCore.Qt.ItemFlag.ItemIsEditable
ItemIsEnabled = PySide.QtCore.Qt.ItemFlag.ItemIsEnabled
ItemIsSelectable = PySide.QtCore.Qt.ItemFlag.ItemIsSelectable
ItemIsTristate = PySide.QtCore.Qt.ItemFlag.ItemIsTristate
ItemIsUserCheckable = PySide.QtCore.Qt.ItemFlag.ItemIsUserCheckable
ItemSelectionMode = None # (!) real value is "<type 'PySide.QtCore.Qt.ItemSelectionMode'>"
KeepAspectRatio = PySide.QtCore.Qt.AspectRatioMode.KeepAspectRatio
KeepAspectRatioByExpanding = PySide.QtCore.Qt.AspectRatioMode.KeepAspectRatioByExpanding
Key = None # (!) real value is "<type 'PySide.QtCore.Qt.Key'>"
KeyboardModifier = None # (!) real value is "<type 'PySide.QtCore.Qt.KeyboardModifier'>"
KeyboardModifierMask = PySide.QtCore.Qt.KeyboardModifier.KeyboardModifierMask
KeyboardModifiers = None # (!) real value is "<type 'KeyboardModifiers'>"
KeypadModifier = PySide.QtCore.Qt.KeyboardModifier.KeypadModifier
Key_0 = PySide.QtCore.Qt.Key.Key_0
Key_1 = PySide.QtCore.Qt.Key.Key_1
Key_2 = PySide.QtCore.Qt.Key.Key_2
Key_3 = PySide.QtCore.Qt.Key.Key_3
Key_4 = PySide.QtCore.Qt.Key.Key_4
Key_5 = PySide.QtCore.Qt.Key.Key_5
Key_6 = PySide.QtCore.Qt.Key.Key_6
Key_7 = PySide.QtCore.Qt.Key.Key_7
Key_8 = PySide.QtCore.Qt.Key.Key_8
Key_9 = PySide.QtCore.Qt.Key.Key_9
Key_A = PySide.QtCore.Qt.Key.Key_A
Key_Aacute = PySide.QtCore.Qt.Key.Key_Aacute
Key_Acircumflex = PySide.QtCore.Qt.Key.Key_Acircumflex
Key_acute = PySide.QtCore.Qt.Key.Key_acute
Key_AddFavorite = PySide.QtCore.Qt.Key.Key_AddFavorite
Key_Adiaeresis = PySide.QtCore.Qt.Key.Key_Adiaeresis
Key_AE = PySide.QtCore.Qt.Key.Key_AE
Key_Agrave = PySide.QtCore.Qt.Key.Key_Agrave
Key_Alt = PySide.QtCore.Qt.Key.Key_Alt
Key_AltGr = PySide.QtCore.Qt.Key.Key_AltGr
Key_Ampersand = PySide.QtCore.Qt.Key.Key_Ampersand
Key_Any = PySide.QtCore.Qt.Key.Key_Any
Key_Apostrophe = PySide.QtCore.Qt.Key.Key_Apostrophe
Key_ApplicationLeft = PySide.QtCore.Qt.Key.Key_ApplicationLeft
Key_ApplicationRight = PySide.QtCore.Qt.Key.Key_ApplicationRight
Key_Aring = PySide.QtCore.Qt.Key.Key_Aring
Key_AsciiCircum = PySide.QtCore.Qt.Key.Key_AsciiCircum
Key_AsciiTilde = PySide.QtCore.Qt.Key.Key_AsciiTilde
Key_Asterisk = PySide.QtCore.Qt.Key.Key_Asterisk
Key_At = PySide.QtCore.Qt.Key.Key_At
Key_Atilde = PySide.QtCore.Qt.Key.Key_Atilde
Key_AudioCycleTrack = PySide.QtCore.Qt.Key.Key_AudioCycleTrack
Key_AudioForward = PySide.QtCore.Qt.Key.Key_AudioForward
Key_AudioRandomPlay = PySide.QtCore.Qt.Key.Key_AudioRandomPlay
Key_AudioRepeat = PySide.QtCore.Qt.Key.Key_AudioRepeat
Key_AudioRewind = PySide.QtCore.Qt.Key.Key_AudioRewind
Key_Away = PySide.QtCore.Qt.Key.Key_Away
Key_B = PySide.QtCore.Qt.Key.Key_B
Key_Back = PySide.QtCore.Qt.Key.Key_Back
Key_BackForward = PySide.QtCore.Qt.Key.Key_BackForward
Key_Backslash = PySide.QtCore.Qt.Key.Key_Backslash
Key_Backspace = PySide.QtCore.Qt.Key.Key_Backspace
Key_Backtab = PySide.QtCore.Qt.Key.Key_Backtab
Key_Bar = PySide.QtCore.Qt.Key.Key_Bar
Key_BassBoost = PySide.QtCore.Qt.Key.Key_BassBoost
Key_BassDown = PySide.QtCore.Qt.Key.Key_BassDown
Key_BassUp = PySide.QtCore.Qt.Key.Key_BassUp
Key_Battery = PySide.QtCore.Qt.Key.Key_Battery
Key_Bluetooth = PySide.QtCore.Qt.Key.Key_Bluetooth
Key_Book = PySide.QtCore.Qt.Key.Key_Book
| |
otherwise the header provided
"""
if row['Field Name'] in short_cols:
return short_cols[row['Field Name']]
return row['Field Name']
def add_field_name_to_value(row):
""" Combine the field name and value provided into one string.
Args:
row: the dataframe row containing information about a cell, including the header and contents
Returns:
The field name and value provided combined into one string
"""
return row['Field Name'] + ': ' + row['Value Provided']
def check_required(data, required, required_labels, report_headers, short_cols, flex_data, is_fabs):
""" Check if all fields that are required to have content in the file have content.
Args:
data: the dataframe containing the data for the submission
required: A list of headers that represent the required fields in the file
required_labels: A mapping of labels that will get added to required field errors in FABS submissions
report_headers: The list of error/warning report headers in order
short_cols: A mapping of the database column names to the lowercased DAIMS headers
flex_data: the dataframe containing flex data for this file
is_fabs: A boolean indicating if this is a FABS submission or not
Returns:
A dataframe containing error text that can be turned into an error report for required fields
"""
# Get just the required columns along with the row number and unique ID
req_data = data[required + ['row_number', 'unique_id']]
# Flip the data so each header + cell combination is its own row, keeping the relevant row numbers and unique IDs
errors = pd.melt(req_data, id_vars=['row_number', 'unique_id'], value_vars=required, var_name='Field Name',
value_name='Value Provided')
# Throw out all rows that have data
errors = errors[errors['Value Provided'].isnull()]
errors.rename(columns={'row_number': 'Row Number', 'unique_id': 'Unique ID'}, inplace=True)
errors = errors.reset_index()
errors['Value Provided'] = ''
errors['Rule Message'] = ValidationError.requiredErrorMsg
errors['Expected Value'] = '(not blank)'
errors['Difference'] = ''
if not errors.empty:
errors['Rule Label'] = errors.apply(lambda x: apply_label(x, required_labels, is_fabs), axis=1)
errors['Flex Field'] = errors.apply(lambda x: gather_flex_fields(x, flex_data), axis=1)
errors['Field Name'] = errors.apply(lambda x: update_field_name(x, short_cols), axis=1)
else:
errors['Rule Label'] = ''
errors['Flex Field'] = ''
# sorting the headers after all the moving around
errors = errors[report_headers]
errors['error_type'] = ValidationError.requiredError
return errors
def check_type(data, type_fields, type_labels, report_headers, csv_schema, short_cols, flex_data, is_fabs):
""" Check if all fields that are a type other than string match that type.
Args:
data: the dataframe containing the data for the submission
type_fields: A list of headers that represent the non-string fields in the file
type_labels: A mapping of labels that will get added to non-string field errors in FABS submissions
report_headers: The list of error/warning report headers in order
csv_schema: the schema containing the details about the columns for this file
short_cols: A mapping of the database column names to the lowercased DAIMS headers
flex_data: the dataframe containing flex data for this file
is_fabs: A boolean indicating if this is a FABS submission or not
Returns:
A dataframe containing error text that can be turned into an error report for non-string fields
"""
# Get just the non-string columns along with the row number and unique ID
# We will be modifying, by nulling the valid types, so get a copy so as to not modify the original data
invalid_datatype = data[type_fields + ['row_number', 'unique_id']].copy()
for type_field in type_fields:
# For each col-Series, null-out (set to NaN) any cells that meet datatype requirements
invalid_datatype[type_field] = invalid_type_vector(invalid_datatype[type_field], csv_schema)
# Flip the data so each header + cell combination is its own row, keeping the relevant row numbers and unique IDs
errors = pd.melt(invalid_datatype, id_vars=['row_number', 'unique_id'], value_vars=type_fields,
var_name='Field Name', value_name='Value Provided')
# Throw out rows for all cell values that were compliant or originally null
errors = errors[~(errors['Value Provided'].isnull())]
errors.rename(columns={'row_number': 'Row Number', 'unique_id': 'Unique ID'}, inplace=True)
errors = errors.reset_index()
errors['Rule Message'] = ValidationError.typeErrorMsg
errors['Difference'] = ''
if not errors.empty:
errors['Expected Value'] = errors.apply(lambda x: expected_type(x, csv_schema), axis=1)
errors['Rule Label'] = errors.apply(lambda x: apply_label(x, type_labels, is_fabs), axis=1)
errors['Flex Field'] = errors.apply(lambda x: gather_flex_fields(x, flex_data), axis=1)
errors['Field Name'] = errors.apply(lambda x: update_field_name(x, short_cols), axis=1)
errors['Value Provided'] = errors.apply(lambda x: add_field_name_to_value(x), axis=1)
else:
errors['Expected Value'] = ''
errors['Rule Label'] = ''
errors['Flex Field'] = ''
# sorting the headers after all the moving around
errors = errors[report_headers]
errors['error_type'] = ValidationError.typeError
return errors
def check_length(data, length_fields, report_headers, csv_schema, short_cols, flex_data, type_error_rows):
""" Check if all fields that have a maximum length are at or under that length.
Args:
data: the dataframe containing the data for the submission
length_fields: A list of headers that represent the fields in the file with maximum lengths
report_headers: The list of error/warning report headers in order
csv_schema: the schema containing the details about the columns for this file
short_cols: A mapping of the database column names to the lowercased DAIMS headers
flex_data: the dataframe containing flex data for this file
type_error_rows: A list of row numbers indicating what rows have type errors
Returns:
A dataframe containing error text that can be turned into an error report for fields that are too long
"""
# Drop all rows that have a type error
exceeds_length = data[~(data['row_number'].isin(type_error_rows))]
# Get just the columns with a maximum length along with the row number and unique ID
# We will be modifying, by nulling the valid types, so get a copy so as to not modify the original data
exceeds_length = exceeds_length[length_fields + ['row_number', 'unique_id']].copy()
for length_field in length_fields:
# For each col-Series, null-out (set to NaN) any cells that meet max length requirements
exceeds_length[length_field] = invalid_length_vector(exceeds_length[length_field], csv_schema)
# Flip the data so each header + cell combination is its own row, keeping the dfrelevant row numbers and unique IDs
errors = pd.melt(exceeds_length, id_vars=['row_number', 'unique_id'], value_vars=length_fields,
var_name='Field Name', value_name='Value Provided')
# Throw out rows for all cell values that were compliant or originally null
errors = errors[~(errors['Value Provided'].isnull())]
errors.rename(columns={'row_number': 'Row Number', 'unique_id': 'Unique ID'}, inplace=True)
errors = errors.reset_index()
errors['Rule Message'] = ValidationError.lengthErrorMsg
errors['Difference'] = ''
errors['Rule Label'] = ''
if not errors.empty:
errors['Expected Value'] = errors.apply(lambda x: expected_length(x, csv_schema), axis=1)
errors['Flex Field'] = errors.apply(lambda x: gather_flex_fields(x, flex_data), axis=1)
errors['Field Name'] = errors.apply(lambda x: update_field_name(x, short_cols), axis=1)
errors['Value Provided'] = errors.apply(lambda x: add_field_name_to_value(x), axis=1)
else:
errors['Expected Value'] = ''
errors['Flex Field'] = ''
# sorting the headers after all the moving around
errors = errors[report_headers]
errors['error_type'] = ValidationError.lengthError
return errors
def check_field_format(data, format_fields, report_headers, short_cols, flex_data):
""" Check if all the fields are in the appropriate format.
Args:
data: the dataframe containing the data for the submission
format_fields: A list of headers that represent the format check fields in the file
report_headers: The list of error/warning report headers in order
short_cols: A mapping of the database column names to the lowercased DAIMS headers
flex_data: the dataframe containing flex data for this file
Returns:
A dataframe containing error text that can be turned into an error report for non-string fields
"""
# Get just the non-string columns along with the row number and unique ID
type_data = data[format_fields + ['row_number', 'unique_id']]
# Flip the data so each header + cell combination is its own row, keeping the relevant row numbers and unique IDs
errors = pd.melt(type_data, id_vars=['row_number', 'unique_id'], value_vars=format_fields,
var_name='Field Name', value_name='Value Provided')
# Throw out all rows that don't have data, they don't have a format check
errors = errors[~errors['Value Provided'].isnull()]
# If there is data that needs checking, keep only the data that doesn't have the right type
if not errors.empty:
errors['matches_format'] = errors.apply(lambda x: valid_format(x), axis=1)
errors = errors[~errors['matches_format']]
errors.drop(['matches_format'], axis=1, inplace=True)
errors.rename(columns={'row_number': 'Row Number', 'unique_id': 'Unique ID'}, inplace=True)
errors = errors.reset_index()
errors['Rule Message'] = ValidationError.fieldFormatErrorMsg
errors['Difference'] = ''
errors['Rule Label'] = 'DABSDATETIME'
errors['Expected Value'] = 'A date in the YYYYMMDD format.'
if not errors.empty:
errors['Flex Field'] = errors.apply(lambda x: gather_flex_fields(x, flex_data), axis=1)
errors['Field Name'] = errors.apply(lambda x: update_field_name(x, short_cols), axis=1)
errors['Value Provided'] = errors.apply(lambda x: add_field_name_to_value(x), axis=1)
else:
errors['Flex Field'] = ''
# sorting the headers after all the moving around
errors = errors[report_headers]
errors['error_type'] = ValidationError.fieldFormatError
return errors
def parse_fields(sess, fields):
""" Parse | |
(FileDropResource, "POST", None, {u"mimeType": "TKBX"}, "reader", 200),
(Recovery, "POST", None, {u"email": "826S"}, None, 200),
(Recovery, "POST", None, {u"email": "826S"}, "devtable", 200),
(Recovery, "POST", None, {u"email": "826S"}, "freshuser", 200),
(Recovery, "POST", None, {u"email": "826S"}, "reader", 200),
(Signout, "POST", None, None, None, 200),
(Signout, "POST", None, None, "devtable", 200),
(Signout, "POST", None, None, "freshuser", 200),
(Signout, "POST", None, None, "reader", 200),
(Signin, "POST", None, {u"username": "E9RY", u"password": "<PASSWORD>"}, None, 403),
(Signin, "POST", None, {u"username": "E9RY", u"password": "<PASSWORD>"}, "devtable", 403),
(Signin, "POST", None, {u"username": "E9RY", u"password": "<PASSWORD>"}, "freshuser", 403),
(Signin, "POST", None, {u"username": "E9RY", u"password": "<PASSWORD>"}, "reader", 403),
(ExternalLoginInformation, "POST", {"service_id": "someservice"}, {}, None, 400),
(ExternalLoginInformation, "POST", {"service_id": "someservice"}, {}, "devtable", 400),
(ExternalLoginInformation, "POST", {"service_id": "someservice"}, {}, "freshuser", 400),
(ExternalLoginInformation, "POST", {"service_id": "someservice"}, {}, "reader", 400),
(DetachExternal, "POST", {"service_id": "someservice"}, {}, None, 401),
(DetachExternal, "POST", {"service_id": "someservice"}, {}, "devtable", 200),
(DetachExternal, "POST", {"service_id": "someservice"}, {}, "freshuser", 200),
(DetachExternal, "POST", {"service_id": "someservice"}, {}, "reader", 200),
(VerifyUser, "POST", None, {u"password": "<PASSWORD>"}, None, 401),
(VerifyUser, "POST", None, {u"password": "password"}, "devtable", 200),
(VerifyUser, "POST", None, {u"password": "<PASSWORD>"}, "freshuser", 403),
(VerifyUser, "POST", None, {u"password": "<PASSWORD>"}, "reader", 403),
(ClientKey, "POST", None, {u"password": "<PASSWORD>"}, None, 401),
(ClientKey, "POST", None, {u"password": "password"}, "devtable", 200),
(ClientKey, "POST", None, {u"password": "<PASSWORD>"}, "freshuser", 400),
(ClientKey, "POST", None, {u"password": "password"}, "reader", 200),
(ListPlans, "GET", None, None, None, 200),
(ListPlans, "GET", None, None, "devtable", 200),
(ListPlans, "GET", None, None, "freshuser", 200),
(ListPlans, "GET", None, None, "reader", 200),
(User, "GET", None, None, None, 401),
(User, "GET", None, None, "devtable", 200),
(User, "GET", None, None, "freshuser", 200),
(User, "GET", None, None, "reader", 200),
(User, "POST", None, {u"username": "T946", u"password": "<PASSWORD>", u"email": "MENT"}, None, 400),
(
User,
"POST",
None,
{u"username": "T946", u"password": "<PASSWORD>", u"email": "MENT"},
"devtable",
400,
),
(
User,
"POST",
None,
{u"username": "T946", u"password": "<PASSWORD>", u"email": "MENT"},
"freshuser",
400,
),
(
User,
"POST",
None,
{u"username": "T946", u"password": "<PASSWORD>", u"email": "MENT"},
"reader",
400,
),
(User, "PUT", None, {}, None, 401),
(User, "PUT", None, {}, "devtable", 200),
(User, "PUT", None, {}, "freshuser", 200),
(User, "PUT", None, {}, "reader", 200),
(User, "DELETE", None, {}, None, 401),
(User, "DELETE", None, {}, "devtable", 400),
(User, "DELETE", None, {}, "freshuser", 204),
(User, "DELETE", None, {}, "reader", 204),
(
TeamMember,
"DELETE",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "readers"},
None,
None,
401,
),
(
TeamMember,
"DELETE",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "readers"},
None,
"devtable",
400,
),
(
TeamMember,
"DELETE",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "readers"},
None,
"freshuser",
403,
),
(
TeamMember,
"DELETE",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "readers"},
None,
"reader",
403,
),
(
TeamMember,
"PUT",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "readers"},
None,
None,
401,
),
(
TeamMember,
"PUT",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "readers"},
None,
"devtable",
200,
),
(
TeamMember,
"PUT",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "readers"},
None,
"freshuser",
403,
),
(
TeamMember,
"PUT",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "readers"},
None,
"reader",
403,
),
(
TeamMember,
"DELETE",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "owners"},
None,
None,
401,
),
(
TeamMember,
"DELETE",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "owners"},
None,
"devtable",
400,
),
(
TeamMember,
"DELETE",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "owners"},
None,
"freshuser",
403,
),
(
TeamMember,
"DELETE",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "owners"},
None,
"reader",
403,
),
(
TeamMember,
"PUT",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "owners"},
None,
None,
401,
),
(
TeamMember,
"PUT",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "owners"},
None,
"devtable",
400,
),
(
TeamMember,
"PUT",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "owners"},
None,
"freshuser",
403,
),
(
TeamMember,
"PUT",
{"orgname": "buynlarge", "membername": "devtable", "teamname": "owners"},
None,
"reader",
403,
),
(TeamPermissions, "GET", {"orgname": "buynlarge", "teamname": "readers"}, None, None, 401),
(
TeamPermissions,
"GET",
{"orgname": "buynlarge", "teamname": "readers"},
None,
"devtable",
200,
),
(
TeamPermissions,
"GET",
{"orgname": "buynlarge", "teamname": "readers"},
None,
"freshuser",
403,
),
(TeamPermissions, "GET", {"orgname": "buynlarge", "teamname": "readers"}, None, "reader", 403),
(TeamMemberList, "GET", {"orgname": "buynlarge", "teamname": "readers"}, None, None, 401),
(TeamMemberList, "GET", {"orgname": "buynlarge", "teamname": "readers"}, None, "devtable", 200),
(
TeamMemberList,
"GET",
{"orgname": "buynlarge", "teamname": "readers"},
None,
"freshuser",
403,
),
(TeamMemberList, "GET", {"orgname": "buynlarge", "teamname": "readers"}, None, "reader", 200),
(TeamMemberList, "GET", {"orgname": "buynlarge", "teamname": "owners"}, None, None, 401),
(TeamMemberList, "GET", {"orgname": "buynlarge", "teamname": "owners"}, None, "devtable", 200),
(TeamMemberList, "GET", {"orgname": "buynlarge", "teamname": "owners"}, None, "freshuser", 403),
(TeamMemberList, "GET", {"orgname": "buynlarge", "teamname": "owners"}, None, "reader", 403),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "public/publicrepo"},
None,
None,
401,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "public/publicrepo"},
None,
"devtable",
403,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "public/publicrepo"},
None,
"freshuser",
403,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "public/publicrepo"},
None,
"reader",
403,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "public/publicrepo"},
None,
None,
401,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "public/publicrepo"},
None,
"devtable",
403,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "public/publicrepo"},
None,
"freshuser",
403,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "public/publicrepo"},
None,
"reader",
403,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "public/publicrepo"},
{u"role": u"read"},
None,
401,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "public/publicrepo"},
{u"role": u"read"},
"devtable",
403,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "public/publicrepo"},
{u"role": u"read"},
"freshuser",
403,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "public/publicrepo"},
{u"role": u"read"},
"reader",
403,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "devtable/shared"},
None,
None,
401,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "devtable/shared"},
None,
"devtable",
400,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "devtable/shared"},
None,
"freshuser",
403,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "devtable/shared"},
None,
"reader",
403,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "devtable/shared"},
None,
None,
401,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "devtable/shared"},
None,
"devtable",
400,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "devtable/shared"},
None,
"freshuser",
403,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "devtable/shared"},
None,
"reader",
403,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "devtable/shared"},
{u"role": u"read"},
None,
401,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "devtable/shared"},
{u"role": u"read"},
"devtable",
400,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "devtable/shared"},
{u"role": u"read"},
"freshuser",
403,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "devtable/shared"},
{u"role": u"read"},
"reader",
403,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
None,
None,
401,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
None,
"devtable",
400,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
None,
"freshuser",
403,
),
(
RepositoryUserPermission,
"DELETE",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
None,
"reader",
403,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
None,
None,
401,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
None,
"devtable",
400,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
None,
"freshuser",
403,
),
(
RepositoryUserPermission,
"GET",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
None,
"reader",
403,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
{u"role": u"read"},
None,
401,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
{u"role": u"read"},
"devtable",
400,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
{u"role": u"read"},
"freshuser",
403,
),
(
RepositoryUserPermission,
"PUT",
{"username": "A2O9", "repository": "buynlarge/orgrepo"},
{u"role": u"read"},
"reader",
403,
),
(
RepositoryTeamPermission,
"DELETE",
{"repository": "public/publicrepo", "teamname": "readers"},
None,
None,
401,
),
(
RepositoryTeamPermission,
"DELETE",
{"repository": "public/publicrepo", "teamname": "readers"},
None,
"devtable",
403,
),
(
RepositoryTeamPermission,
"DELETE",
{"repository": "public/publicrepo", "teamname": "readers"},
None,
"freshuser",
403,
),
(
RepositoryTeamPermission,
"DELETE",
{"repository": "public/publicrepo", "teamname": "readers"},
None,
"reader",
403,
),
(
RepositoryTeamPermission,
"GET",
{"repository": "public/publicrepo", "teamname": "readers"},
None,
None,
401,
),
(
RepositoryTeamPermission,
"GET",
{"repository": "public/publicrepo", "teamname": "readers"},
None,
"devtable",
403,
),
(
RepositoryTeamPermission,
"GET",
{"repository": "public/publicrepo", "teamname": "readers"},
None,
"freshuser",
403,
),
(
RepositoryTeamPermission,
"GET",
{"repository": "public/publicrepo", "teamname": "readers"},
None,
"reader",
403,
),
(
RepositoryTeamPermission,
"PUT",
{"repository": "public/publicrepo", "teamname": "readers"},
{u"role": u"read"},
None,
401,
),
(
RepositoryTeamPermission,
"PUT",
{"repository": "public/publicrepo", "teamname": "readers"},
{u"role": u"read"},
"devtable",
403,
),
(
RepositoryTeamPermission,
"PUT",
{"repository": "public/publicrepo", "teamname": "readers"},
{u"role": u"read"},
"freshuser",
403,
),
(
RepositoryTeamPermission,
"PUT",
{"repository": "public/publicrepo", "teamname": "readers"},
{u"role": u"read"},
"reader",
403,
),
(
RepositoryTeamPermission,
"DELETE",
{"repository": "devtable/shared", "teamname": "readers"},
None,
None,
401,
),
(
RepositoryTeamPermission,
"DELETE",
{"repository": "devtable/shared", "teamname": "readers"},
None,
"devtable",
400,
),
(
RepositoryTeamPermission,
"DELETE",
{"repository": "devtable/shared", "teamname": "readers"},
None,
"freshuser",
403,
),
(
RepositoryTeamPermission,
"DELETE",
{"repository": "devtable/shared", "teamname": "readers"},
None,
"reader",
403,
),
(
RepositoryTeamPermission,
"GET",
{"repository": "devtable/shared", "teamname": "readers"},
None,
None,
401,
),
(
RepositoryTeamPermission,
"GET",
{"repository": "devtable/shared", "teamname": | |
<gh_stars>0
## aen_attack.py -- attack a network optimizing elastic-net distance with an en decision rule
## when autoencoder loss is applied
##
## Copyright (C) 2018, <NAME> <<EMAIL>>
## <NAME> <<EMAIL>>
## <NAME> <<EMAIL>>
## Copyright (C) 2017, <NAME> <<EMAIL>>.
## Copyright (C) 2016, <NAME> <<EMAIL>>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the "supplementary license" folder present in the root directory.
##
## Modifications Copyright (c) 2019 IBM Corporation
import sys
import tensorflow as tf
import numpy as np
import pickle
from tensorflow.keras.models import Model, Sequential, model_from_json
from tensorflow.keras.callbacks import ModelCheckpoint
import os
class AEADEN:
def __init__(self, sess, model, attributes, aix360_path, mode, batch_size, kappa, init_learning_rate, binary_search_steps, max_iterations, initial_const, gamma, attr_reg, attr_penalty_reg, latent_square_loss_reg):
"""
Initialize PN explainer object.
Args:
sess (tensorflow.python.client.session.Session): Tensorflow session
model: KerasClassifier that contains a trained model to be explained
attributes (str list): list of attributes to load attribute classifiers for
aix360_path (str): path to aix360 used to determine paths to pretrained attribute classifiers
mode (str): "PN" for pertinent negative or "PP" for pertinent positive
batch_size (int): batch size for how many instances to explain
kappa (float): Confidence parameter that controls difference between prediction of
PN (or PP) and original prediction
init_learning_rate (float): initial learning rate for gradient descent optimizer
binary_search_steps (int): Controls number of random restarts to find best PN
max_iterations (int): Max number iterations to run some version of gradient descent on
PN optimization problem from a single random initialization, i.e., total
number of iterations wll be arg_binary_search_steps * arg_max_iterations
initial_const (int): Constant used for upper/lower bounds in binary search
gamma (float): Penalty parameter encouraging addition of attributes for PN
attr_reg (float): Penalty parameter on regularization of PN to be predicted different from
original image
attr_penalty_reg (float): Penalty regularizing PN from being too different from original image
latent_square_loss_reg (float): Penalty regularizing PN from being too different from original
image in the latent space
"""
# image_size, num_channels, nun_classes = model.image_size, model.num_channels, model.num_labels
# %%change%%
image_size = model._input_shape[0]
num_channels = model._input_shape[2]
nun_classes = model._nb_classes
shape = (batch_size, image_size, image_size, num_channels)
latent_shape = (batch_size, 512)
self.sess = sess
self.INIT_LEARNING_RATE = init_learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.kappa = kappa
self.init_const = initial_const
self.batch_size = batch_size
self.mode = mode
self.gamma = gamma
self.attributes = attributes
self.aix360_path = aix360_path
self.attr_reg = attr_reg
self.attr_penalty_reg = attr_penalty_reg
self.attr_threshold = tf.constant(0.5, dtype="float32") # penalize the attributes of orig_img having scores <= this value
self.latent_square_loss_reg = latent_square_loss_reg
# these are variables to be more efficient in sending data to tf
self.orig_img = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.orig_latent = tf.Variable(np.zeros(latent_shape), dtype=tf.float32, name="orig_latent")
self.adv_latent = tf.Variable(np.zeros(latent_shape), dtype=tf.float32, name="adv_latent")
self.target_lab = tf.Variable(np.zeros((batch_size,nun_classes)), dtype=tf.float32)
self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)
self.global_step = tf.Variable(0.0, trainable=False)
# and here's what we use to assign them
self.assign_orig_img = tf.placeholder(tf.float32, shape)
self.assign_orig_latent = tf.placeholder(tf.float32, latent_shape, name="assign_orig_latent")
self.assign_adv_latent = tf.placeholder(tf.float32, latent_shape, name="assign_adv_latent")
self.assign_target_lab = tf.placeholder(tf.float32, (batch_size,nun_classes), name="assign_target_label")
self.assign_const = tf.placeholder(tf.float32, [batch_size])
### Load attribute classifier
nn_type = "simple"
#import copy
attr_model_list=[]
# attr_threshold_idx=[]
# count=0
for attr in self.attributes:
# load test data into memory using Image Data Generator
# print("Loading data for " + attr + " into memory")
# load json and create model
json_file_name = os.path.join(aix360_path, "models/CEM_MAF/{}_{}_model.json".format(nn_type, attr))
json_file = open(json_file_name, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
weight_file_name = os.path.join(aix360_path, "models/CEM_MAF/{}_{}_weights.h5".format(nn_type, attr))
loaded_model.load_weights(weight_file_name)
print("Loaded model for " + attr + " from disk")
attr_model_list.append(loaded_model)
# if self.mode == "PP":
# pass
# else:
# attr_threshold_idx=tf.cond(loaded_model(self.orig_img)<= self.attr_threshold, lambda: attr_threshold_idx.append(count), lambda: attr_threshold_idx)
# count=count+1
print("# of attr models is",len(attr_model_list))
# print("# of attr smaller than THR is",len(attr_threshold_idx))
# load gan
# Import official CelebA-HQ networks.
with open(os.path.join(aix360_path, 'algorithms/contrastive/progressive_growing_of_gans/karras2018iclr-celebahq-1024x1024.pkl'), 'rb') as file:
G, D, Gs = pickle.load(file)
# inputs, redundent ...
# in_labels = tf.placeholder(tf.float32, shape=(None, 0))
in_labels = tf.constant(0, shape=(1, 0))
# get the GAN network,
with tf.variable_scope(Gs.scope, reuse=tf.AUTO_REUSE):
out_image = Gs._build_func(self.adv_latent , in_labels, **Gs.static_kwargs, reuse=True)
tanspose_image = tf.transpose(out_image, perm=[0, 2, 3, 1])
resize_image= tf.image.resize_images(tanspose_image, [224, 224])
self.adv_img = tf.clip_by_value(resize_image/2, -0.5, 0.5)
self.adv_updater = tf.assign(self.adv_latent, self.assign_adv_latent)
"""--------------------------------"""
# prediction BEFORE-SOFTMAX of the model
self.delta_img = self.orig_img - self.adv_img
# %%change%%
if self.mode == "PP":
# self.ImgToEnforceLabel_Score = model.predict(self.delta_img)
self.ImgToEnforceLabel_Score = model.predictsym(self.delta_img)
elif self.mode == "PN":
# self.ImgToEnforceLabel_Score = model.predict(self.adv_img)
self.ImgToEnforceLabel_Score = model.predictsym(self.adv_img)
# Attribute classifier score
self.attr_score = tf.constant(0, dtype="float32")
self.attr_penalty = tf.constant(0, dtype="float32")
if self.mode == "PP":
for i in range(len(attr_model_list)):
self.attr_score = self.attr_score + tf.maximum(attr_model_list[i](self.adv_img) - attr_model_list[i](self.orig_img),tf.constant(0, tf.float32))
self.attr_score = tf.squeeze(self.attr_score)
#self.ImgToEnforceLabel_Score = model.predict(self.adv_img)
#self.ImgToEnforceLabel_Score_s = model.predict(self.adv_img_s)
elif self.mode == "PN":
for i in range(len(attr_model_list)):
self.attr_score = self.attr_score + tf.maximum(attr_model_list[i](self.orig_img) - attr_model_list[i](self.adv_img),tf.constant(0, tf.float32))
self.attr_score = tf.squeeze(self.attr_score)
#self.attr_penalty = tf.squeeze(self.attr_penalty)
self.attr_penalty = self.attr_penalty + tf.multiply(tf.cond(tf.squeeze(attr_model_list[i](self.orig_img)) <= self.attr_threshold, lambda: tf.constant(1, tf.float32), lambda: tf.constant(0, tf.float32)),tf.squeeze(attr_model_list[i](self.adv_img)))
#tf.maximum(attr_model_list[i](self.orig_img) - attr_model_list[i](self.adv_img),tf.constant(0, tf.float32))
# Sum of attributes penalty in attr_threshold_idx
# self.attr_penalty = tf.constant(0, dtype="float32")
# if len(attr_threshold_idx)==0:
# pass
# else:
# for i in range(len(attr_threshold_idx)):
# self.attr_penalty = self.attr_penalty + attr_model_list[i](self.adv_img)
self.delta_latent = self.orig_latent - self.adv_latent
# distance to the input data
self.L2_img_dist = tf.reduce_sum(tf.square(self.delta_img),[1,2,3])
self.L2_latent_dist = tf.reduce_sum(tf.square(self.delta_latent))
# compute the probability of the label class versus the maximum other
self.target_lab_score = tf.reduce_sum((self.target_lab)*self.ImgToEnforceLabel_Score,1)
self.max_nontarget_lab_score = tf.reduce_max((1-self.target_lab)*self.ImgToEnforceLabel_Score - (self.target_lab*10000),1)
if self.mode == "PP":
Loss_Attack = tf.maximum(0.0, self.max_nontarget_lab_score - self.target_lab_score + self.kappa)
elif self.mode == "PN":
Loss_Attack = tf.maximum(0.0, -self.max_nontarget_lab_score + self.target_lab_score + self.kappa)
# sum up the losses
self.Loss_Latent_L2Dist = tf.reduce_sum(self.latent_square_loss_reg*self.L2_latent_dist)
self.Loss_Img_L2Dist = tf.reduce_sum(self.gamma*self.L2_img_dist)
self.Loss_Attack = tf.reduce_sum(self.const*Loss_Attack)
self.Loss_attr = tf.reduce_sum(self.attr_reg*self.attr_score)
self.Loss_attr_penalty = tf.reduce_sum(self.attr_penalty_reg*self.attr_penalty)
self.Loss_Overall = self.Loss_Latent_L2Dist + self.Loss_Img_L2Dist + self.Loss_Attack + self.Loss_attr + self.Loss_attr_penalty
# self.Loss_Overall = self.Loss_Attack
self.learning_rate = tf.train.polynomial_decay(self.INIT_LEARNING_RATE, self.global_step, self.MAX_ITERATIONS, 0, power=0.5)
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
start_vars = set(x.name for x in tf.global_variables())
self.train = optimizer.minimize(self.Loss_Overall, var_list=[self.adv_latent], global_step=self.global_step)
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# these are the variables to initialize when we run
self.setup = []
self.setup.append(self.orig_img.assign(self.assign_orig_img))
self.setup.append(self.orig_latent.assign(self.assign_orig_latent))
self.setup.append(self.adv_latent.assign(self.assign_adv_latent))
self.setup.append(self.target_lab.assign(self.assign_target_lab))
self.setup.append(self.const.assign(self.assign_const))
self.init = tf.variables_initializer(var_list=[self.global_step]+[self.adv_latent]+new_vars)
def attack(self, imgs, labs, latent):
"""
Find PN for an input instance input_image e.g. celebA is shape (1, 224, 224, 3)
Input:
imgs (numpy.ndarry): images to be explained, of shape (num_images, size, size, channels)
labs: one hot encoded vectors of target labels for PN, i.e. which labels
to explain with an image of a different class
latent (numpy.ndarry): image to be explained, of shape (1, size, size, channels)
in the latent space
Output:
adv_img (numpy.ndarry): the pertinent negative image
"""
def compare(x,y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
# x[y] -= self.kappa if self.PP else -self.kappa
if self.mode == "PP":
x[y] -= self.kappa
elif self.mode == "PN":
x[y] += self.kappa
x = np.argmax(x)
if self.mode == "PP":
return x==y
else:
return x!=y
batch_size = self.batch_size
# set the lower and upper bounds accordingly
Const_LB = np.zeros(batch_size)
CONST = np.ones(batch_size)*self.init_const
Const_UB = np.ones(batch_size)*1e10
# the best l2, score, and image attack
overall_best_dist = [1e10]*batch_size
overall_best_attack = [np.zeros(imgs[0].shape)]*batch_size
for binary_search_steps_idx in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
img_batch = imgs[:batch_size]
label_batch = labs[:batch_size]
current_step_best_dist = [1e10]*batch_size
current_step_best_score = [-1]*batch_size
# set the variables so that we don't have to send them over again
self.sess.run(self.setup, {self.assign_orig_img: img_batch,
self.assign_target_lab: label_batch,
self.assign_const: CONST,
self.assign_adv_latent: latent,
self.assign_orig_latent: latent
})
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
self.sess.run([self.train])
temp_adv_latent = self.sess.run(self.adv_latent)
self.sess.run(self.adv_updater, feed_dict={self.assign_adv_latent: temp_adv_latent})
Loss_Overall, OutputScore, adv_img = self.sess.run([self.Loss_Overall, self.ImgToEnforceLabel_Score, self.adv_img])
Loss_Attack, Loss_Latent_L2Dist, Loss_Img_L2Dist, Loss_attr = self.sess.run([self.Loss_Attack, self.Loss_Latent_L2Dist, self.Loss_Img_L2Dist, self.attr_score])
target_lab_score, max_nontarget_lab_score_s = self.sess.run([self.target_lab_score, self.max_nontarget_lab_score])
# if iteration%(self.MAX_ITERATIONS//10) == 0:
if iteration % 10 == 0:
print("iter:{} const:{}". format(iteration, CONST))
print("Loss_Overall:{:.4f}, Loss_Attack:{:.4f}, Loss_attr:{:.4f}". format(Loss_Overall, Loss_Attack, Loss_attr))
print("Loss_Latent_L2Dist:{:.4f}, Loss_Img_L2Dist:{:.4f}". format(Loss_Latent_L2Dist, Loss_Img_L2Dist))
print("target_lab_score:{:.4f}, max_nontarget_lab_score:{:.4f}". format(target_lab_score[0], max_nontarget_lab_score_s[0]))
print("")
sys.stdout.flush()
for batch_idx,(the_dist, the_score, the_adv_img) in enumerate(zip([Loss_Overall], OutputScore, adv_img)):
if the_dist < current_step_best_dist[batch_idx] and compare(the_score, np.argmax(label_batch[batch_idx])):
current_step_best_dist[batch_idx] = the_dist
current_step_best_score[batch_idx] = np.argmax(the_score)
if the_dist < overall_best_dist[batch_idx] and compare(the_score, np.argmax(label_batch[batch_idx])):
overall_best_dist[batch_idx] = the_dist
overall_best_attack[batch_idx] = the_adv_img
# adjust the constant as needed
for batch_idx in range(batch_size):
if compare(current_step_best_score[batch_idx], np.argmax(label_batch[batch_idx])) and current_step_best_score[batch_idx] != -1:
# success, divide const by two
Const_UB[batch_idx] = min(Const_UB[batch_idx],CONST[batch_idx])
if | |
sample in range(K):
# Update excluded_ids.
if neg_sample_one_hot is not None:
excluded_ids = excluded_ids + neg_sample_one_hot * -10000
if use_softmax_sample:
uniform_noise = paddle.rand(
logits.shape, dtype=paddle.get_default_dtype())
gumbel_noise = -paddle.log(-paddle.log(uniform_noise + 1e-9) +
1e-9)
else:
gumbel_noise = paddle.zeros_like(logits)
sampled_ids = paddle.argmax(
F.softmax(logits + gumbel_noise + excluded_ids), axis=-1)
# One-hot encoding of sample_ids.
neg_sample_one_hot = F.one_hot(sampled_ids, logits.shape[-1])
if neg_samples_ids is None:
neg_samples_ids = sampled_ids.unsqueeze(2)
else:
neg_samples_ids = paddle.concat(
[neg_samples_ids, sampled_ids.unsqueeze(2)], axis=2)
return neg_samples_ids
def forward(self,
input_ids=None,
token_type_ids=None,
position_ids=None,
attention_mask=None,
raw_input_ids=None,
generator_labels=None):
assert (
generator_labels is not None
), "generator_labels should not be None, please check DataCollator"
generator_logits = self.generator(input_ids, token_type_ids,
position_ids, attention_mask)
disc_input_list = self.get_discriminator_inputs_ernie_health(
input_ids, raw_input_ids, generator_logits, generator_labels,
self.use_softmax_sample)
disc_inputs, disc_labels, _, disc_candidates = disc_input_list
logits_rtd, logits_mts, logits_csp = self.discriminator(
disc_inputs, disc_candidates, token_type_ids, position_ids,
attention_mask)
if attention_mask is None:
pad_id = self.generator.electra.pad_token_id
attention_mask = (input_ids != pad_id)
else:
attention_mask = attention_mask.astype('bool')
return generator_logits, logits_rtd, logits_mts, logits_csp, disc_labels, attention_mask
class ElectraForMultipleChoice(ElectraPretrainedModel):
"""
Electra Model with a linear layer on top of the hidden-states output layer,
designed for multiple choice tasks like RocStories/SWAG tasks.
Args:
electra (:class:`ElectraModel`):
An instance of ElectraModel.
num_choices (int, optional):
The number of choices. Defaults to `2`.
dropout (float, optional):
The dropout probability for output of Electra.
If None, use the same value as `hidden_dropout_prob` of `ElectraModel`
instance `electra`. Defaults to None.
"""
def __init__(self, electra, num_choices=2, dropout=None):
super(ElectraForMultipleChoice, self).__init__()
self.num_choices = num_choices
self.electra = electra
self.sequence_summary = ElectraPooler(
self.electra.config["hidden_size"], pool_act="gelu")
self.dropout = nn.Dropout(dropout if dropout is not None else
self.electra.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.electra.config["hidden_size"], 1)
self.init_weights()
def forward(self,
input_ids=None,
token_type_ids=None,
position_ids=None,
attention_mask=None):
r"""
The ElectraForMultipleChoice forward method, overrides the __call__() special method.
Args:
input_ids (Tensor):
See :class:`ElectraModel` and shape as [batch_size, num_choice, sequence_length].
token_type_ids (Tensor, optional):
See :class:`ElectraModel` and shape as [batch_size, num_choice, sequence_length].
position_ids(Tensor, optional):
See :class:`ElectraModel` and shape as [batch_size, num_choice, sequence_length].
attention_mask (list, optional):
See :class:`ElectraModel` and shape as [batch_size, num_choice, sequence_length].
Returns:
Tensor: Returns tensor `reshaped_logits`, a tensor of the multiple choice classification logits.
Shape as `[batch_size, num_choice]` and dtype as `float32`.
Example:
.. code-block::
import paddle
from paddlenlp.transformers import ElectraForMultipleChoice, ElectraTokenizer
from paddlenlp.data import Pad, Dict
tokenizer = ElectraTokenizer.from_pretrained('electra-small')
model = ElectraForMultipleChoice.from_pretrained('electra-small', num_choices=2)
data = [
{
"question": "how do you turn on an ipad screen?",
"answer1": "press the volume button.",
"answer2": "press the lock button.",
"label": 1,
},
{
"question": "how do you indent something?",
"answer1": "leave a space before starting the writing",
"answer2": "press the spacebar",
"label": 0,
},
]
text = []
text_pair = []
for d in data:
text.append(d["question"])
text_pair.append(d["answer1"])
text.append(d["question"])
text_pair.append(d["answer2"])
inputs = tokenizer(text, text_pair)
batchify_fn = lambda samples, fn=Dict(
{
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
"token_type_ids": Pad(
axis=0, pad_val=tokenizer.pad_token_type_id
), # token_type_ids
}
): fn(samples)
inputs = batchify_fn(inputs)
reshaped_logits = model(
input_ids=paddle.to_tensor(inputs[0], dtype="int64"),
token_type_ids=paddle.to_tensor(inputs[1], dtype="int64"),
)
print(reshaped_logits.shape)
# [2, 2]
"""
input_ids = input_ids.reshape(
(-1, input_ids.shape[-1])) # flat_input_ids: [bs*num_choice,seq_l]
if token_type_ids is not None:
token_type_ids = token_type_ids.reshape(
(-1, token_type_ids.shape[-1]))
if position_ids is not None:
position_ids = position_ids.reshape((-1, position_ids.shape[-1]))
if attention_mask is not None:
attention_mask = attention_mask.reshape(
(-1, attention_mask.shape[-1]))
sequence_output = self.electra(input_ids, token_type_ids, position_ids,
attention_mask)
pooled_output = self.sequence_summary(sequence_output)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output) # logits: (bs*num_choice,1)
reshaped_logits = logits.reshape(
(-1, self.num_choices)) # logits: (bs, num_choice)
return reshaped_logits
class ElectraPretrainingCriterion(paddle.nn.Layer):
'''
Args:
vocab_size(int):
Vocabulary size of `inputs_ids` in `ElectraModel`. Defines the number of different tokens that can
be represented by the `inputs_ids` passed when calling `ElectraModel`.
gen_weight(float):
The weight of the Electra Generator.
disc_weight(float):
The weight of the Electra Discriminator.
'''
def __init__(self, vocab_size, gen_weight, disc_weight):
super(ElectraPretrainingCriterion, self).__init__()
self.vocab_size = vocab_size
self.gen_weight = gen_weight
self.disc_weight = disc_weight
self.gen_loss_fct = nn.CrossEntropyLoss(reduction='none')
self.disc_loss_fct = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, generator_prediction_scores,
discriminator_prediction_scores, generator_labels,
discriminator_labels, attention_mask):
"""
Args:
generator_prediction_scores(Tensor):
The scores of masked token prediction. Its data type should be float32.
and its shape is [batch_size, sequence_length, vocab_size].
discriminator_prediction_scores(Tensor):
The scores of masked token prediction. Its data type should be float32.
and its shape is [batch_size, sequence_length] or [sequence length] if batch_size=1.
generator_labels(Tensor):
The labels of the generator, its dimensionality is equal to `generator_prediction_scores`.
Its data type should be int64 and its shape is [batch_size, sequence_size, 1].
discriminator_labels(Tensor):
The labels of the discriminator, its dimensionality is equal to `discriminator_prediction_scores`.
The labels should be numbers between 0 and 1.
Its data type should be float32 and its shape is [batch_size, sequence_size] or [sequence length] if batch_size=1.
attention_mask(Tensor):
See :class:`ElectraModel`.
Returns:
Tensor: The pretraining loss, equals to weighted generator loss plus the weighted discriminator loss.
Its data type should be float32 and its shape is [1].
"""
# generator loss
gen_loss = self.gen_loss_fct(
paddle.reshape(generator_prediction_scores, [-1, self.vocab_size]),
paddle.reshape(generator_labels, [-1]))
# todo: we can remove 4 lines after when CrossEntropyLoss(reduction='mean') improved
umask_positions = paddle.zeros_like(generator_labels).astype(
paddle.get_default_dtype())
mask_positions = paddle.ones_like(generator_labels).astype(
paddle.get_default_dtype())
mask_positions = paddle.where(generator_labels == -100, umask_positions,
mask_positions)
if mask_positions.sum() == 0:
gen_loss = paddle.to_tensor([0.0])
else:
gen_loss = gen_loss.sum() / mask_positions.sum()
# discriminator loss
seq_length = discriminator_labels.shape[1]
disc_loss = self.disc_loss_fct(
paddle.reshape(discriminator_prediction_scores, [-1, seq_length]),
discriminator_labels.astype(paddle.get_default_dtype()))
if attention_mask is not None:
umask_positions = paddle.ones_like(discriminator_labels).astype(
paddle.get_default_dtype())
mask_positions = paddle.zeros_like(discriminator_labels).astype(
paddle.get_default_dtype())
use_disc_loss = paddle.where(attention_mask, disc_loss,
mask_positions)
umask_positions = paddle.where(attention_mask, umask_positions,
mask_positions)
disc_loss = use_disc_loss.sum() / umask_positions.sum()
else:
total_positions = paddle.ones_like(discriminator_labels).astype(
paddle.get_default_dtype())
disc_loss = disc_loss.sum() / total_positions.sum()
return self.gen_weight * gen_loss + self.disc_weight * disc_loss
class ErnieHealthPretrainingCriterion(paddle.nn.Layer):
'''
Args:
vocab_size(int):
Vocabulary size of `inputs_ids` in `ElectraModel`. Defines the number of different tokens that can
be represented by the `inputs_ids` passed when calling `ElectraModel`.
gen_weight(float):
The weight of the Electra Generator.
disc_weight(float):
The weight of the Electra Discriminator.
'''
def __init__(self, vocab_size, gen_weight):
super(ErnieHealthPretrainingCriterion, self).__init__()
self.vocab_size = vocab_size
self.gen_weight = gen_weight
self.rtd_weight = 50.0
self.mts_weight = 20.0
self.csp_weight = 1.0
self.gen_loss_fct = nn.CrossEntropyLoss(reduction='none')
self.disc_rtd_loss_fct = nn.BCEWithLogitsLoss(reduction='none')
self.disc_csp_loss_fct = nn.CrossEntropyLoss(reduction='none')
self.disc_mts_loss_fct = nn.CrossEntropyLoss(reduction='none')
self.temperature = 0.07
def forward(self, generator_logits, generator_labels, logits_rtd,
logits_mts, logits_csp, discriminator_labels, attention_mask):
"""
Args:
generator_logits(Tensor):
The scores of masked token prediction. Its data type should be float32.
and its shape is [batch_size, sequence_length, vocab_size].
generator_labels(Tensor):
The labels of the generator, its dimensionality is equal to `generator_prediction_scores`.
Its data type should be int64 and its shape is [batch_size, sequence_size, 1].
logits_rtd(Tensor):
The scores of masked token prediction. Its data type should be float32.
and its shape is [batch_size, sequence_length] or [sequence length] if batch_size=1.
discriminator_labels(Tensor):
The labels of the discriminator, its dimensionality is equal to `discriminator_prediction_scores`.
The labels should be numbers between 0 and 1.
Its data type should be float32 and its shape is [batch_size, sequence_size] or [sequence length] if batch_size=1.
attention_mask(Tensor):
See :class:`ElectraModel`.
Returns:
Tensor: The pretraining loss, equals to weighted generator loss plus the weighted discriminator loss.
Its data type should be float32 and its shape is [1].
"""
# generator loss
gen_loss = self.gen_loss_fct(
paddle.reshape(generator_logits, [-1, self.vocab_size]),
paddle.reshape(generator_labels, [-1]))
# todo: we can remove 4 lines after when CrossEntropyLoss(reduction='mean') improved
umask_positions = paddle.zeros_like(generator_labels).astype(
paddle.get_default_dtype())
mask_positions = paddle.ones_like(generator_labels).astype(
paddle.get_default_dtype())
mask_positions = paddle.where(generator_labels == -100, umask_positions,
mask_positions)
if mask_positions.sum() == 0:
gen_loss = paddle.to_tensor([0.0])
else:
gen_loss = gen_loss.sum() / mask_positions.sum()
# RTD discriminator loss
seq_length = discriminator_labels.shape[1]
rtd_labels = discriminator_labels
disc_rtd_loss = self.disc_rtd_loss_fct(
paddle.reshape(logits_rtd, [-1, seq_length]),
rtd_labels.astype(logits_rtd.dtype))
if attention_mask is not None:
umask_positions = paddle.ones_like(rtd_labels).astype(
paddle.get_default_dtype())
mask_positions = paddle.zeros_like(rtd_labels).astype(
paddle.get_default_dtype())
umask_positions = paddle.where(attention_mask, umask_positions,
mask_positions)
# Mask has different meanings here. It denotes [mask] token in
# generator and denotes [pad] token in discriminator.
disc_rtd_loss = paddle.where(attention_mask, disc_rtd_loss,
mask_positions)
disc_rtd_loss = disc_rtd_loss.sum() / umask_positions.sum()
else:
total_positions = paddle.ones_like(rtd_labels).astype(
paddle.get_default_dtype())
disc_rtd_loss = disc_rtd_loss.sum() / total_positions.sum()
# MTS discriminator loss
replaced_positions = discriminator_labels.astype("bool")
mts_labels = paddle.zeros(
[logits_mts.shape[0] * logits_mts.shape[1]],
dtype=generator_labels.dtype).detach()
disc_mts_loss = self.disc_mts_loss_fct(
paddle.reshape(logits_mts, [-1, logits_mts.shape[-1]]), mts_labels)
disc_mts_loss = paddle.reshape(disc_mts_loss, [-1, seq_length])
original_positions = paddle.zeros_like(replaced_positions).astype(
paddle.get_default_dtype())
disc_mts_loss = paddle.where(replaced_positions, disc_mts_loss,
original_positions)
if discriminator_labels.sum() == 0:
disc_mts_loss = paddle.to_tensor([0.0])
else:
disc_mts_loss = disc_mts_loss.sum() / discriminator_labels.sum()
# CSP discriminator loss
logits_csp = F.normalize(logits_csp, axis=-1)
# Gather from all devices (split first)
logit_csp_0, logit_csp_1 = paddle.split(
logits_csp, num_or_sections=2, axis=0)
if paddle.distributed.get_world_size() > 1:
csp_list_0, csp_list_1 = [], []
paddle.distributed.all_gather(csp_list_0, logit_csp_0)
paddle.distributed.all_gather(csp_list_1, logit_csp_1)
logit_csp_0 = paddle.concat(csp_list_0, axis=0)
logit_csp_1 = paddle.concat(csp_list_1, axis=0)
batch_size = | |
from __future__ import absolute_import
from __future__ import division
import os.path
import datetime
import csv
import copy
import socket
from itertools import dropwhile
import numpy as np
import scipy.interpolate as interpolate
from sunpy.net import hek
from sunpy.time import parse_time
from sunpy import config
from sunpy import lightcurve
from sunpy.util.net import check_download_file
__all__ = ['get_goes_event_list', 'temp_em', 'goes_chianti_tem']
try:
# Check required data files are present in user's default download dir
# Define location where GOES data files are stored.
# Manually resolve the hostname
HOST = socket.gethostbyname_ex('hesperia.gsfc.nasa.gov')[-1][0]
except socket.gaierror:
HOST = ''
GOES_REMOTE_PATH = "http://{0}/ssw/gen/idl/synoptic/goes/".format(HOST)
# Define location where data files should be downloaded to.
DATA_PATH = config.get("downloads", "download_dir")
# Define variables for file names
FILE_TEMP_COR = "goes_chianti_temp_cor.csv"
FILE_TEMP_PHO = "goes_chianti_temp_pho.csv"
FILE_EM_COR = "goes_chianti_em_cor.csv"
FILE_EM_PHO = "goes_chianti_em_pho.csv"
def get_goes_event_list(timerange, goes_class_filter=None):
"""
Retrieve list of flares detected by GOES within a given time range.
Parameters
----------
timerange: sunpy.time.TimeRange
The time range to download the event list for.
goes_class_filter: (optional) string
A string specifying a minimum GOES class for inclusion in the list,
e.g. 'M1', 'X2'.
"""
# use HEK module to search for GOES events
client = hek.HEKClient()
event_type = 'FL'
tstart = timerange.start
tend = timerange.end
# query the HEK for a list of events detected by the GOES instrument
# between tstart and tend (using a GOES-class filter)
if goes_class_filter:
result = client.query(hek.attrs.Time(tstart, tend),
hek.attrs.EventType(event_type),
hek.attrs.FL.GOESCls > goes_class_filter,
hek.attrs.OBS.Observatory == 'GOES')
else:
result = client.query(hek.attrs.Time(tstart, tend),
hek.attrs.EventType(event_type),
hek.attrs.OBS.Observatory == 'GOES')
# want to condense the results of the query into a more manageable
# dictionary
# keep event data, start time, peak time, end time, GOES-class,
# location, active region source (as per GOES list standard)
# make this into a list of dictionaries
goes_event_list = []
for r in result:
goes_event = {
'event_date': parse_time(r['event_starttime']).date().strftime('%Y-%m-%d'),
'start_time': parse_time(r['event_starttime']),
'peak_time': parse_time(r['event_peaktime']),
'end_time': parse_time(r['event_endtime']),
'goes_class': str(r['fl_goescls']),
'goes_location': (r['event_coord1'], r['event_coord2']),
'noaa_active_region': r['ar_noaanum']
}
goes_event_list.append(goes_event)
return goes_event_list
def temp_em(goeslc, abundances="coronal", download=False, download_dir=DATA_PATH):
"""
Calculates and adds temperature and EM to a GOESLightCurve.
This function calculates the isothermal temperature and volume
emission measure of the solar soft X-ray emitting plasma observed by
the GOES/XRS. This is done using the function goes_chianti_tem().
See that function for more details. Once the temperature and
emission measure are found, they are added to a copy of the
original GOESLightCurve object as goeslc.data.temperature and
goeslc.data.em where goeslc is the GOESLightCurve object.
Parameters
----------
goeslc : GOESLightCurve object
abundances : (optional) string equalling either 'coronal' or 'photospheric'.
States whether photospheric or coronal abundances should be assumed.
Default='coronal'
download : (optional) bool
If True, the GOES temperature and emission measure data files are downloaded.
It is important to do this if a new version of the files has been
generated due to a new CHIANTI version being released or the launch of
new GOES satellites since these files were originally downloaded.
Default=False
download_dir : (optional) string
The directory to download the GOES temperature and emission measure
data files to.
Default=SunPy default download directory
Returns
-------
goeslc.data.temperature : pandas.core.series.Series
Array of temperature values [MK]
goeslc.data.em : pandas.core.series.Series
Array of volume emission measure values [10**49 cm**-3]
Examples
--------
>>> from sunpy.lightcurve as lc
>>> goeslc = lc.GOESLightCurve.create(time1, time2)
>>> goeslc.data
xrsa xrsb
2014-01-01 00:00:00 7e-07 7e-06
2014-01-01 00:00:02 7e-07 7e-06
2014-01-01 00:00:04 7e-07 7e-06
2014-01-01 00:00:06 7e-07 7e-06
>>> goeslc_new = temp_em(goeslc)
>>> goeslc_new.data
xrsa xrsb temperature em
2014-01-01 00:00:00 7e-07 7e-06 11.28295376 4.78577516e+48
2014-01-01 00:00:02 7e-07 7e-06 11.28295376 4.78577516e+48
2014-01-01 00:00:04 7e-07 7e-06 11.28295376 4.78577516e+48
2014-01-01 00:00:06 7e-07 7e-06 11.28295376 4.78577516e+48
"""
# Check that input argument is of correct type
if not isinstance(goeslc, lightcurve.GOESLightCurve):
raise TypeError("goeslc must be a GOESLightCurve object.")
# Find temperature and emission measure with goes_chianti_tem
temp, em = goes_chianti_tem(goeslc.data.xrsb, goeslc.data.xrsa,
satellite=goeslc.meta["TELESCOP"].split()[1],
date=goeslc.data.index[0],
abundances=abundances, download=download,
download_dir=download_dir)
# Enter results into new version of GOES LightCurve Object
goeslc_new = copy.deepcopy(goeslc)
goeslc_new.data["temperature"] = temp
goeslc_new.data["em"] = em
return goeslc_new
def goes_chianti_tem(longflux, shortflux, satellite=8,
date=datetime.datetime.today(), abundances="coronal",
download=False, download_dir=DATA_PATH):
"""
Calculates temperature and emission measure from GOES/XRS data.
This function calculates the isothermal temperature and volume
emission measure of the solar soft X-ray emitting plasma observed by
the GOES/XRS. This is done using the observed flux ratio of the
short (0.5-4 angstrom) to long (1-8 angstrom) channels.
Parameters
----------
longflux, shortflux : ndarray or array-like which can be converted to float64 type, such as an np.array, tuple, list.
Arrays containing the long and short GOES/XRS flux measurements
respectively as a function of time. Must be of same length. [W/m**2].
satellite : int (optional)
Number of GOES satellite used to make observations, important for
correct calibration of data.
Default=8
date : datetime object or str
Date when observations made. Important for correctcalibration.
Default=today
abundances : (optional) string equalling either 'coronal' or 'photospheric'.
States whether photospheric or coronal abundances should be assumed.
Default='coronal'
download : (optional) bool
If True, the GOES temperature and emission measure data files are
downloaded. It is important to do this if a new version of the files
has been generated due to a new CHIANTI version being released or the
launch of new GOES satellites since these files were originally downloaded.
Default=False
download_dir : (optional) string
The directory to download the GOES temperature and emission measure
data files to.
Default=SunPy default download directory
Returns
-------
temp : numpy array
Array of temperature values of same length as longflux and shortflux. [MK]
em : numpy array
Array of volume emission measure values of same length as longflux
and shortflux. [10**49 cm**-3]
Notes
-----
The temperature and volume emission measure are calculated here
using the methods of White et al. (2005) who used the
CHIANTI atomic physics database to model the response of the ratio
of the short (0.5-4 angstrom) to long (1-8 angstrom) channels of the
XRSs onboard various GOES satellites. This method assumes an
isothermal plasma, the ionisation equilibria of
Mazzotta et al. (1998), and a constant density of 10**10 cm**-3.
(See White et al. 2005 for justification of this last assumption.)
This function is based on goes_chianti_tem.pro in SolarSoftWare
written in IDL by <NAME>.
Recent fluxes released to the public are scaled to be consistent
with GOES-7. In fact these recent fluxes are correct and so this
correction must be removed before proceeding to use transfer
functions.
Email <NAME> (NOAA) for more information.
Measurements of short channel flux of less than 1e-10 W/m**2 or
long channel flux less than 3e-8 W/m**2 are not considered good.
Ratio values corresponding to suxh fluxes are set to 0.003.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. 2005, Sol. Phys.,
227, 231
.. [2] <NAME>., <NAME>., <NAME>., & <NAME>.
1998, A&AS, 133, 339
Examples
--------
>>> longflux = np.array([7e-6, 7e-6])
>>> shortflux = np.array([7e-7, 7e-7])
>>> temp, em = goes_chianti_tem(longflux, shortflux, satellite=15,
date='2014-04-16', abundances="coronal")
>>> temp
array([11.28295376, 11.28295376])
>>> em
array([ 4.78577516e+48, 4.78577516e+48])
"""
# ENSURE INPUTS ARE OF CORRECT TYPE AND VALID VALUES
longflux = np.asanyarray(longflux, dtype=np.float64)
shortflux = np.asanyarray(shortflux, dtype=np.float64)
int(satellite)
if satellite < 1:
raise ValueError("satellite must be the number of a "
"valid GOES satellite (>1).")
date = parse_time(date)
# Check flux arrays are of same length.
if len(longflux) != len(shortflux):
raise ValueError(
"longflux and shortflux must have same number of elements.")
# PREPARE DATA
# GOES 6 long channel flux before 1983-Jun-28 must be corrected by a
# factor of 4.43/5.32
if date < datetime.datetime(1983, 06, 28) and satellite == 6:
longflux_corrected = longflux * (4.43/5.32)
else:
longflux_corrected = longflux
# Un-scale fluxes if GOES satellite is after 7. See 2nd paragraph
# in Notes section of docstring above.
if satellite > 7:
longflux_corrected = longflux_corrected / 0.7
shortflux_corrected = shortflux / 0.85
else:
shortflux_corrected = shortflux
# Calculate short to long channel ratio.
# Data which is not good have their ratio value set to 0.003.
# See Notes section in docstring above.
index = np.logical_or(shortflux_corrected < 1e-10,
longflux_corrected < 3e-8)
fluxratio = shortflux_corrected / longflux_corrected
fluxratio[index] = 0.003
# FIND TEMPERATURE AND EMISSION MEASURE FROM | |
626, 830, 969, 809, 522, 539],
[910, 121, 228, 803, 443, 4, 341, 64, 60, 438, 964],
[320, 135, 26, 700, 58, 741, 111, 944, 580, 855, 195],
[2, 802, 971, 42, 232, 432, 910, 803, 694, 46, 826],
[612, 974, 539, 639, 21, 878, 809, 246, 218, 331, 974],
[804, 448, 962, 406, 439, 556, 826, 109, 798, 609, 867],
[260, 335, 33, 122, 577, 639, 88, 887, 760, 705, 784],
[893, 908, 88, 16, 905, 923, 220, 690, 648, 747, 591],
[276, 217, 551, 996, 879, 575, 154, 724, 468, 856, 317],
[427, 269, 210, 221, 352, 980, 952, 189, 573, 520, 383]]),
[64, 644, 694, 5, 163, 760, 568, 84, 67, 517, 872, 539,
964, 195, 826, 974, 867, 784, 591, 317, 383, 520, 573,
189, 952, 980, 352, 221, 210, 269, 427, 276, 893, 260,
804, 612, 2, 320, 910, 933, 412, 172, 162, 97, 626,
830, 969, 809, 522, 438, 855, 46, 331, 609, 705, 747,
856, 468, 724, 154, 575, 879, 996, 551, 217, 908, 335,
448, 974, 802, 135, 121, 228, 803, 443, 4, 341, 64,
60, 580, 694, 218, 798, 760, 648, 690, 220, 923, 905,
16, 88, 33, 962, 539, 971, 26, 700, 58, 741, 111, 944,
803, 246, 109, 887, 88, 639, 577, 122, 406, 639, 42,
232, 432, 910, 809, 826, 556, 439, 21, 878])
def test_snail_045(self):
self.assertEqual(snail([[631, 374, 877, 595, 738, 324, 704, 280, 468,
923, 505, 471, 786],
[725, 339, 191, 381, 268, 471, 998, 922, 108,
118, 197, 889, 647],
[422, 442, 5, 197, 843, 702, 57, 58, 593, 76,
159, 773, 840],
[166, 158, 990, 841, 117, 450, 765, 455, 254,
99, 224, 624, 608],
[129, 252, 729, 219, 199, 309, 229, 855, 542,
972, 470, 850, 286],
[255, 368, 57, 890, 572, 308, 655, 779, 134,
580, 335, 387, 888],
[27, 281, 301, 15, 780, 318, 425, 931, 277, 972,
499, 622, 692],
[466, 70, 405, 84, 594, 63, 228, 946, 440, 354,
720, 480, 259],
[257, 323, 934, 503, 258, 510, 921, 254, 430,
508, 484, 353, 949],
[321, 168, 497, 248, 670, 628, 258, 877, 585,
965, 796, 567, 233],
[190, 127, 715, 552, 621, 806, 212, 367, 420,
826, 534, 428, 604],
[908, 504, 880, 691, 117, 289, 731, 232, 629,
161, 417, 942, 52],
[341, 721, 127, 728, 46, 763, 884, 431, 905,
951, 338, 775, 868]]),
[631, 374, 877, 595, 738, 324, 704, 280, 468, 923, 505,
471, 786, 647, 840, 608, 286, 888, 692, 259, 949, 233,
604, 52, 868, 775, 338, 951, 905, 431, 884, 763, 46,
728, 127, 721, 341, 908, 190, 321, 257, 466, 27, 255,
129, 166, 422, 725, 339, 191, 381, 268, 471, 998, 922,
108, 118, 197, 889, 773, 624, 850, 387, 622, 480, 353,
567, 428, 942, 417, 161, 629, 232, 731, 289, 117, 691,
880, 504, 127, 168, 323, 70, 281, 368, 252, 158, 442,
5, 197, 843, 702, 57, 58, 593, 76, 159, 224, 470, 335,
499, 720, 484, 796, 534, 826, 420, 367, 212, 806, 621,
552, 715, 497, 934, 405, 301, 57, 729, 990, 841, 117,
450, 765, 455, 254, 99, 972, 580, 972, 354, 508, 965,
585, 877, 258, 628, 670, 248, 503, 84, 15, 890, 219,
199, 309, 229, 855, 542, 134, 277, 440, 430, 254, 921,
510, 258, 594, 780, 572, 308, 655, 779, 931, 946, 228,
63, 318, 425])
def test_snail_046(self):
self.assertEqual(snail(
[[448, 727, 434, 177, 987], [288, 839, 372, 379, 326],
[266, 287, 407, 590, 327], [782, 941, 470, 580, 365],
[823, 313, 939, 776, 834]]),
[448, 727, 434, 177, 987, 326, 327, 365, 834, 776, 939,
313, 823, 782, 266, 288, 839, 372, 379, 590, 580, 470,
941, 287, 407])
def test_snail_047(self):
self.assertEqual(snail([[134, 625, 697, 457, 3, 817, 998, 303, 562, 680,
864, 613, 483, 648, 569, 37],
[328, 426, 402, 699, 409, 971, 63, 339, 238,
759, 392, 835, 574, 349, 949, 842],
[491, 104, 329, 958, 321, 561, 47, 185, 759,
121, 608, 163, 746, 268, 114, 96],
[166, 374, 830, 603, 171, 472, 891, 395, 650,
879, 219, 441, 151, 672, 331, 202],
[763, 122, 903, 770, 555, 406, 876, 126, 509,
564, 333, 937, 863, 163, 970, 818],
[736, 749, 999, 758, 110, 809, 701, 861, 153,
823, 721, 107, 944, 830, 750, 333],
[750, 454, 398, 921, 852, 451, 774, 157, 715,
578, 474, 135, 955, 838, 386, 887],
[140, 935, 474, 862, 292, 785, 433, 271, 153,
908, 426, 686, 694, 206, 251, 533],
[13, 708, 970, 604, 773, 469, 663, 311, 734,
930, 528, 284, 558, 278, 112, 796],
[737, 293, 588, 611, 94, 821, 436, 105, 464,
543, 35, 623, 3, 33, 611, 809],
[812, 394, 490, 319, 385, 300, 47, 217, 181,
839, 527, 229, 889, 212, 754, 34],
[615, 169, 501, 732, 472, 298, 728, 494, 639,
582, 167, 79, 679, 66, 116, 445],
[307, 688, 864, 469, 119, 374, 338, 182, 396,
651, 77, 319, 744, 499, 95, 599],
[684, 884, 412, 446, 154, 747, 892, 34, 875,
845, 609, 455, 551, 940, 151, 932],
[949, 14, 409, 86, 966, 430, 157, 919, 875, 783,
268, 184, 420, 162, 970, 673],
[65, 50, 700, 314, 348, 547, 655, 313, 165, 573,
789, 164, 219, 216, 353, 975]]),
[134, 625, 697, 457, 3, 817, 998, 303, 562, 680, 864,
613, 483, 648, 569, 37, 842, 96, 202, 818, 333, 887,
533, 796, 809, 34, 445, 599, 932, 673, 975, 353, 216,
219, 164, 789, 573, 165, 313, 655, 547, 348, 314, 700,
50, 65, 949, 684, 307, 615, 812, 737, 13, 140, 750,
736, 763, 166, 491, 328, 426, 402, 699, 409, 971, 63,
339, 238, 759, 392, 835, 574, 349, 949, 114, 331, 970,
750, 386, 251, 112, 611, 754, 116, 95, 151, 970, 162,
420, 184, 268, 783, 875, 919, 157, 430, 966, 86, 409,
14, 884, 688, 169, 394, 293, 708, 935, 454, 749, 122,
374, 104, 329, 958, 321, 561, 47, 185, 759, 121, 608,
163, 746, 268, 672, 163, 830, 838, 206, 278, 33, 212,
66, 499, 940, 551, 455, 609, 845, 875, 34, 892, 747,
154, 446, 412, 864, 501, 490, 588, 970, 474, 398, 999,
903, 830, 603, 171, 472, 891, 395, 650, 879, 219, 441,
151, 863, 944, 955, 694, 558, 3, 889, 679, 744, 319,
77, 651, 396, 182, 338, 374, 119, 469, 732, 319, 611,
604, 862, 921, 758, 770, 555, 406, 876, 126, 509, 564,
333, 937, 107, 135, 686, 284, 623, 229, 79, 167, 582,
639, 494, 728, 298, 472, 385, 94, 773, 292, 852, 110,
809, 701, 861, 153, 823, 721, 474, 426, 528, 35, 527,
839, 181, 217, 47, 300, 821, 469, 785, 451, 774, 157,
715, 578, 908, 930, 543, 464, 105, 436, 663, 433, 271,
153, 734, 311])
def test_snail_048(self):
self.assertEqual(snail(
[[148, 131, 809, 558, 988], [226, 872, 217, 699, 709],
[326, 703, 976, 559, 826], [749, 582, 891, 321, 58],
[773, 142, 687, 234, 325]]),
[148, 131, 809, 558, 988, 709, 826, 58, 325, 234, 687,
142, 773, 749, 326, 226, 872, 217, 699, 559, 321, 891,
582, 703, 976])
def test_snail_049(self):
self.assertEqual(snail([[705, 149, 326, 506, 792, 406, 65, 525, 996,
158, 592, 282, 643, 696, 31, 520, 757, 275,
276],
[395, 704, 227, 598, 163, 173, 844, 171, 882,
571, 228, 161, 943, 43, 278, 3, 680, 719, 746],
[871, 369, 979, 617, 840, 771, 315, 81, 751,
543, 799, 516, 452, 899, 115, 102, 262, 234,
751],
[667, 55, 885, 708, 943, 586, 330, 992, 663, 19,
180, 786, 89, 208, 486, 706, 742, 854, 883],
[103, 917, 812, 629, 301, 326, 783, 757, 747,
217, 464, | |
<gh_stars>1-10
"""
Misc. utility routines.
"""
import os
import sys
import itertools
import inspect
import warnings
import importlib
from importlib import import_module
from configparser import ConfigParser
from fnmatch import fnmatch
from os.path import join, dirname, basename, isfile, abspath, split, splitext
from argparse import ArgumentParser
from testflo.cover import start_coverage, stop_coverage
_store = {}
def _get_parser():
"""Returns a parser to handle command line args."""
parser = ArgumentParser()
parser.usage = "testflo [options]"
parser.add_argument('--version', action='store_true', dest='version',
help="Display the version number and exit.")
parser.add_argument('-c', '--config', action='store', dest='cfg',
metavar='FILE',
help='Path of config file where preferences are specified.')
parser.add_argument('-t', '--testfile', action='store', dest='testfile',
metavar='FILE',
help='Path to a file containing one testspec per line.')
parser.add_argument('--maxtime', action='store', dest='maxtime',
metavar='TIME_LIMIT', default=-1, type=float,
help='Specifies a time limit in seconds for tests to be saved to '
'the quicktests.in file.')
parser.add_argument('-n', '--numprocs', type=int, action='store',
dest='num_procs', metavar='NUM_TEST_PROCS',
help='Number of concurrent test processes to run. By default, this will '
'use the number of virtual processors available. To force tests to '
'run consecutively, specify a value of 1.')
parser.add_argument('-o', '--outfile', action='store', dest='outfile',
metavar='FILE', default='testflo_report.out',
help='Name of test report file. Default is testflo_report.out.')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help="Include testspec and elapsed time in "
"screen output. Also shows all stderr output, even if test doesn't fail")
parser.add_argument('--compact', action='store_true', dest='compact',
help="Limit output to a single character for each test.")
parser.add_argument('--dryrun', action='store_true', dest='dryrun',
help="Don't actually run tests, but print "
"which tests would have been run.")
parser.add_argument('--pre_announce', action='store_true', dest='pre_announce',
help="Announce the name of each test before it runs. This "
"can help track down a hanging test. This automatically sets -n 1.")
parser.add_argument('-f', '--fail', action='store_true', dest='save_fails',
help="Save failed tests to failtests.in file.")
parser.add_argument('--full_path', action='store_true', dest='full_path',
help="Display full test specs instead of shortened names.")
parser.add_argument('-i', '--isolated', action='store_true', dest='isolated',
help="Run each test in a separate subprocess.")
parser.add_argument('--nompi', action='store_true', dest='nompi',
help="Force all tests to run without MPI. This can be useful "
"for debugging.")
parser.add_argument('-x', '--stop', action='store_true', dest='stop',
help="Stop after the first test failure, or as soon as possible"
" when running concurrent tests.")
parser.add_argument('-s', '--nocapture', action='store_true', dest='nocapture',
help="Standard output (stdout) will not be captured and will be"
" written to the screen immediately.")
parser.add_argument('--coverage', action='store_true', dest='coverage',
help="Perform coverage analysis and display results on stdout")
parser.add_argument('--coverage-html', action='store_true', dest='coveragehtml',
help="Perform coverage analysis and display results in browser")
parser.add_argument('--coverpkg', action='append', dest='coverpkgs',
metavar='PKG',
help="Add the given package to the coverage list. You"
" can use this option multiple times to cover"
" multiple packages.")
parser.add_argument('--cover-omit', action='append', dest='cover_omits',
metavar='FILE',
help="Add a file name pattern to remove it from coverage.")
parser.add_argument('-b', '--benchmark', action='store_true', dest='benchmark',
help='Specifies that benchmarks are to be run rather '
'than tests, so only files starting with "benchmark_" '
'will be executed.')
parser.add_argument('-d', '--datafile', action='store', dest='benchmarkfile',
metavar='FILE', default='benchmark_data.csv',
help='Name of benchmark data file. Default is benchmark_data.csv.')
parser.add_argument('--noreport', action='store_true', dest='noreport',
help="Don't create a test results file.")
parser.add_argument('--show_skipped', action='store_true', dest='show_skipped',
help="Display a list of any skipped tests in the summary.")
parser.add_argument('--disallow_skipped', action='store_true', dest='disallow_skipped',
help="Return exit code 2 if no tests failed but some tests are skipped.")
parser.add_argument('tests', metavar='test', nargs='*',
help='A test method, test case, module, or directory to run. If not '
'supplied, the current working directory is assumed.')
parser.add_argument('-m', '--match', '--testmatch', action='append', dest='test_glob',
metavar='GLOB',
help='Pattern to use for test discovery. Multiple patterns are allowed.',
default=[])
parser.add_argument('--exclude', action='append', dest='excludes', metavar='GLOB', default=[],
help="Pattern to exclude test functions. Multiple patterns are allowed.")
parser.add_argument('--timeout', action='store', dest='timeout', type=float,
help='Timeout in seconds. Test will be terminated if it takes longer than timeout. Only'
' works for tests running in a subprocess (MPI and isolated).')
return parser
def _options2args():
"""Gets the testflo args that should be used in subprocesses."""
cmdset = set([
'--nocapture',
'-s',
'--coverpkg',
'--coverage',
'--coverage-html',
'--cover-omit',
])
keep = []
i = 0
args = sys.argv[1:]
argslen = len(args)
while i < argslen:
arg = args[i]
if arg.split('=',1)[0] in cmdset:
keep.append(arg)
if ((arg.startswith('--coverpkg') or arg.startswith('--cover-omit'))
and '=' not in arg):
i += 1
keep.append(args[i])
i += 1
return keep
def _file_gen(dname, fmatch=bool, dmatch=None):
"""A generator returning files under the given directory, with optional
file and directory filtering.
fmatch: predicate funct
A predicate function that returns True on a match.
This is used to match files only.
dmatch: predicate funct
A predicate function that returns True on a match.
This is used to match directories only.
"""
if dmatch is not None and not dmatch(dname):
return
for path, dirlist, filelist in os.walk(dname):
if dmatch is not None: # prune directories to search
newdl = [d for d in dirlist if dmatch(d)]
if len(newdl) != len(dirlist):
dirlist[:] = newdl # replace contents of dirlist to cause pruning
for name in [f for f in filelist if fmatch(f)]:
yield join(path, name)
def find_files(start, match=None, exclude=None,
dirmatch=None, direxclude=None):
"""Return filenames (using a generator).
start: str or list of str
Starting directory or list of directories.
match: str or predicate funct
Either a string containing a glob pattern to match
or a predicate function that returns True on a match.
This is used to match files only.
exclude: str or predicate funct
Either a string containing a glob pattern to exclude
or a predicate function that returns True to exclude.
This is used to exclude files only.
dirmatch: str or predicate funct
Either a string containing a glob pattern to match
or a predicate function that returns True on a match.
This is used to match directories only.
direxclude: str or predicate funct
Either a string containing a glob pattern to exclude
or a predicate function that returns True to exclude.
This is used to exclude directories only.
Walks all subdirectories below each specified starting directory,
subject to directory filtering.
"""
startdirs = [start] if isinstance(start, str) else start
if len(startdirs) == 0:
return iter([])
if match is None:
matcher = bool
elif isinstance(match, str):
matcher = lambda name: fnmatch(name, match)
else:
matcher = match
if dirmatch is None:
dmatcher = bool
elif isinstance(dirmatch, str):
dmatcher = lambda name: fnmatch(name, dirmatch)
else:
dmatcher = dirmatch
if isinstance(exclude, str):
fmatch = lambda name: matcher(name) and not fnmatch(name, exclude)
elif exclude is not None:
fmatch = lambda name: matcher(name) and not exclude(name)
else:
fmatch = matcher
if isinstance(direxclude, str):
if dmatcher is bool:
dmatch = lambda name: not fnmatch(name, direxclude)
else:
dmatch = lambda name: dmatcher(name) and not fnmatch(name, direxclude)
elif direxclude is not None:
dmatch = lambda name: dmatcher(name) and not direxclude(name)
else:
dmatch = dmatcher
iters = [_file_gen(d, fmatch=fmatch, dmatch=dmatch) for d in startdirs]
if len(iters) > 1:
return itertools.chain(*iters)
else:
return iters[0]
def fpath2modpath(fpath):
"""Given a module filename, return its full Python name including
enclosing packages. (based on existence of ``__init__.py`` files)
"""
if basename(fpath).startswith('__init__.'):
pnames = []
else:
pnames = [splitext(basename(fpath))[0]]
path = dirname(abspath(fpath))
while isfile(join(path, '__init__.py')):
path, pname = split(path)
pnames.append(pname)
return '.'.join(pnames[::-1])
def parent_dirs(fpath):
"""Return a list of the absolute paths of the parent directory and
each of its parent directories for the given file.
"""
parts = abspath(fpath).split(os.path.sep)
pdirs = []
for i in range(2, len(parts)):
pdirs.append(os.path.sep.join(parts[:i]))
return pdirs[::-1]
def get_testpath(testspec):
"""Return the path to the test module separated from
the rest of the test spec.
"""
testspec = testspec.strip()
parts = testspec.split(':')
if len(parts) > 1 and parts[1].startswith('\\'): # windows abs path
path = ':'.join(parts[:2])
if len(parts) == 3:
rest = parts[2]
else:
rest = ''
else:
path, _, rest = testspec.partition(':')
return path, rest
def find_module(name):
"""Return the pathname of the Python file corresponding to the
given module name, or None if it can't be found. The
file must be an uncompiled Python (.py) file.
"""
try:
info = importlib.util.find_spec(name)
except ImportError:
info = None
if info is not None:
return info.origin
_mod2file = {} # keep track of non-pkg files to detect and flag dups
def try_import(fname, modpath):
try:
mod = import_module(modpath)
except ImportError:
# this might be a module that's not in the same
# environment as testflo, so try temporarily prepending
# its parent dirs to sys.path so it'll (hopefully) be
# importable
oldpath = sys.path[:]
sys.path.extend(parent_dirs(fname))
sys.path.append(os.getcwd())
try:
mod = import_module(modpath)
# don't keep this module around in | |
<gh_stars>10-100
# IMPORTS
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors, DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem import MACCSkeys
from itertools import chain
from os import listdir
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import sklearn
from sklearn import datasets
from sklearn.metrics import auc, roc_curve, roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
import sascorer
# CALC FUNCTIONS
def calc_props_dude(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
# Calculate properties and store in dict
prop_dict = {}
# molweight
prop_dict.update({'mol_wg': Descriptors.MolWt(mol)})
# logP
prop_dict.update({'log_p': Chem.Crippen.MolLogP(mol)})
# HBA
prop_dict.update({'hba': Chem.rdMolDescriptors.CalcNumLipinskiHBA(mol)})
# HBD
prop_dict.update({'hbd': Chem.rdMolDescriptors.CalcNumLipinskiHBD(mol)})
# rotatable bonds
prop_dict.update({'rot_bnds': Chem.rdMolDescriptors.CalcNumRotatableBonds(mol)})
# Formal (net) charge
prop_dict.update({'net_charge': Chem.rdmolops.GetFormalCharge(mol)})
prop_array = [prop_dict['mol_wg'], prop_dict['log_p'], prop_dict['hba'],
prop_dict['hbd'], prop_dict['rot_bnds'], prop_dict['net_charge']]
return (prop_dict, prop_array)
except:
return ({}, [0, 0, 0, 0, 0, 0])
def calc_props_dude_extended(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
# Calculate properties and store in dict
prop_dict = {}
# molweight
prop_dict.update({'mol_wg': Descriptors.MolWt(mol)})
# logP
prop_dict.update({'log_p': Chem.Crippen.MolLogP(mol)})
# HBA
prop_dict.update({'hba': Chem.rdMolDescriptors.CalcNumLipinskiHBA(mol)})
# HBD
prop_dict.update({'hbd': Chem.rdMolDescriptors.CalcNumLipinskiHBD(mol)})
# ring count
prop_dict.update({'ring_ct': Chem.rdMolDescriptors.CalcNumRings(mol)})
# rotatable bonds
prop_dict.update({'rot_bnds': Chem.rdMolDescriptors.CalcNumRotatableBonds(mol)})
# Formal (net) charge
prop_dict.update({'net_charge': Chem.rdmolops.GetFormalCharge(mol)})
# Topological polar surface area
prop_dict.update({'tpsa': Chem.rdMolDescriptors.CalcTPSA(mol)})
# Stereo centers
prop_dict.update({'stereo_cnts': len(Chem.FindMolChiralCenters(mol,force=True,includeUnassigned=True))})
prop_array = [prop_dict['mol_wg'], prop_dict['log_p'], prop_dict['hba'],
prop_dict['hbd'], prop_dict['ring_ct'], prop_dict['rot_bnds'],
prop_dict['net_charge'], prop_dict['tpsa'], prop_dict['stereo_cnts']]
return (prop_dict, prop_array)
except:
return ({}, [-10, -10, -10, -10, -10, -10, -10, -10, -10])
def calc_props_basic(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
props = []
# num heavy atoms
props.append(mol.GetNumHeavyAtoms())
# num carbons
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#6]"), maxMatches=mol.GetNumAtoms())))
# num nitrogen
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#7]"), maxMatches=mol.GetNumAtoms())))
# num oxygen
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#8]"), maxMatches=mol.GetNumAtoms())))
# num fluorine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#9]"), maxMatches=mol.GetNumAtoms())))
# num sulfur
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#16]"), maxMatches=mol.GetNumAtoms())))
# num chlorine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#17]"), maxMatches=mol.GetNumAtoms())))
return props
except:
return [0, 0, 0, 0, 0, 0, 0]
def calc_props_muv(smiles):
try:
# Create RDKit mol
mol = Chem.MolFromSmiles(smiles)
props = []
# num atoms (incl. H)
props.append(mol.GetNumAtoms(onlyExplicit=False))
# num heavy atoms
props.append(mol.GetNumHeavyAtoms())
# num boron
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#5]"), maxMatches=mol.GetNumAtoms())))
# num carbons
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#6]"), maxMatches=mol.GetNumAtoms())))
# num nitrogen
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#7]"), maxMatches=mol.GetNumAtoms())))
# num oxygen
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#8]"), maxMatches=mol.GetNumAtoms())))
# num fluorine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#9]"), maxMatches=mol.GetNumAtoms())))
# num phosphorus
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#15]"), maxMatches=mol.GetNumAtoms())))
# num sulfur
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#16]"), maxMatches=mol.GetNumAtoms())))
# num chlorine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#17]"), maxMatches=mol.GetNumAtoms())))
# num bromine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#35]"), maxMatches=mol.GetNumAtoms())))
# num iodine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#53]"), maxMatches=mol.GetNumAtoms())))
# logP
props.append(Chem.Crippen.MolLogP(mol))
# HBA
props.append(Chem.rdMolDescriptors.CalcNumLipinskiHBA(mol))
# HBD
props.append(Chem.rdMolDescriptors.CalcNumLipinskiHBD(mol))
# ring count
props.append(Chem.rdMolDescriptors.CalcNumRings(mol))
# Stereo centers
props.append(len(Chem.FindMolChiralCenters(mol,force=True,includeUnassigned=True)))
return props
except:
return [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
from rdkit.Chem import rdPartialCharges
def calc_partial_charges(mol):
Chem.rdPartialCharges.ComputeGasteigerCharges(mol)
positive_charge, negative_charge = 0, 0
for atom in mol.GetAtoms():
charge = float(atom.GetProp("_GasteigerCharge"))
positive_charge += max(charge, 0)
negative_charge -= min(charge, 0)
return positive_charge, negative_charge
def calc_charges(mol):
positive_charge, negative_charge = 0, 0
for atom in mol.GetAtoms():
charge = float(atom.GetFormalCharge())
positive_charge += max(charge, 0)
negative_charge -= min(charge, 0)
return positive_charge, negative_charge
def calc_props_dekois(smiles):
# Create RDKit mol
try:
mol = Chem.MolFromSmiles(smiles)
mol = Chem.AddHs(mol)
# Calculate properties and store in dict
prop_dict = {}
# molweight
prop_dict.update({'mol_wg': Descriptors.MolWt(mol)})
# logP
prop_dict.update({'log_p': Chem.Crippen.MolLogP(mol)})
# HBA
prop_dict.update({'hba': Chem.rdMolDescriptors.CalcNumLipinskiHBA(mol)})
# HBD
prop_dict.update({'hbd': Chem.rdMolDescriptors.CalcNumLipinskiHBD(mol)})
# aromatic ring count
prop_dict.update({'ring_ct': Chem.rdMolDescriptors.CalcNumAromaticRings(mol)})
# rotatable bonds
prop_dict.update({'rot_bnds': Chem.rdMolDescriptors.CalcNumRotatableBonds(mol)})
# Formal charges
pos, neg = calc_charges(mol)
prop_dict.update({'pos_charge': pos})
prop_dict.update({'neg_charge': neg})
prop_array = [prop_dict['mol_wg'], prop_dict['log_p'], prop_dict['hba'],
prop_dict['hbd'], prop_dict['ring_ct'], prop_dict['rot_bnds'],
prop_dict['pos_charge'], prop_dict['neg_charge']]
return (prop_dict, prop_array)
except:
return ({}, [0, 0, 0, 0, 0, 0, 0, 0])
def calc_props_all(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
props = []
### MUV properties ###
# num atoms (incl. H)
props.append(mol.GetNumAtoms(onlyExplicit=False))
# num heavy atoms
props.append(mol.GetNumHeavyAtoms())
# num boron
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#5]"), maxMatches=mol.GetNumAtoms())))
# num carbons
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#6]"), maxMatches=mol.GetNumAtoms())))
# num nitrogen
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#7]"), maxMatches=mol.GetNumAtoms())))
# num oxygen
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#8]"), maxMatches=mol.GetNumAtoms())))
# num fluorine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#9]"), maxMatches=mol.GetNumAtoms())))
# num phosphorus
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#15]"), maxMatches=mol.GetNumAtoms())))
# num sulfur
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#16]"), maxMatches=mol.GetNumAtoms())))
# num chlorine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#17]"), maxMatches=mol.GetNumAtoms())))
# num bromine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#35]"), maxMatches=mol.GetNumAtoms())))
# num iodine
props.append(len(mol.GetSubstructMatches(Chem.MolFromSmarts("[#53]"), maxMatches=mol.GetNumAtoms())))
# logP
props.append(Chem.Crippen.MolLogP(mol))
# HBA
props.append(Chem.rdMolDescriptors.CalcNumLipinskiHBA(mol))
# HBD
props.append(Chem.rdMolDescriptors.CalcNumLipinskiHBD(mol))
# ring count
props.append(Chem.rdMolDescriptors.CalcNumRings(mol))
# Stereo centers
props.append(len(Chem.FindMolChiralCenters(mol,force=True,includeUnassigned=True)))
### DEKOIS properties (additional) ###
# molweight
props.append(Descriptors.MolWt(mol))
# aromatic ring count
props.append(Chem.rdMolDescriptors.CalcNumAromaticRings(mol))
# rotatable bonds
props.append(Chem.rdMolDescriptors.CalcNumRotatableBonds(mol))
# Pos, neg charges
pos, neg = calc_charges(mol)
props.append(pos)
props.append(neg)
### DUD-E extended (additional) ###
# Formal (net) charge
props.append(Chem.rdmolops.GetFormalCharge(mol))
# Topological polar surface area
props.append(Chem.rdMolDescriptors.CalcTPSA(mol))
### Additional
# QED
props.append(Chem.QED.qed(mol))
# SA score
props.append(sascorer.calculateScore(mol))
return props
except:
return [0]*27
def calc_dataset_props_all(dataset, verbose=False):
# Calculate mol props for an entire dataset of smiles strings
results = []
for i, smiles in enumerate(dataset):
props = calc_props_all(smiles)
if props is not None:
results.append(props)
if verbose and i % 1000 == 0:
print("\rProcessed smiles: " + str(i), end='')
print("\nDone calculating properties")
return np.array(results)
def calc_dataset_props_dude(dataset, verbose=False):
# Calculate mol props for an entire dataset of smiles strings
results = []
for i, smiles in enumerate(dataset):
props = calc_props_dude(smiles)
if props is not None:
results.append(props[1])
if verbose and i % 1000 == 0:
print("\rProcessed smiles: " + str(i), end='')
print("\nDone calculating properties")
return np.array(results)
def calc_dataset_props_dude_extended(dataset, verbose=False):
# Calculate mol props for an entire dataset of smiles strings
results = []
for i, smiles in enumerate(dataset):
props = calc_props_dude_extended(smiles)
if props is not None:
results.append(props[1])
if verbose and i % 1000 == 0:
print("\rProcessed smiles: " + str(i), end='')
print("\nDone calculating properties")
return np.array(results)
def calc_dataset_props_muv(dataset, verbose=False):
# Calculate mol props for an entire dataset of smiles strings
results = []
for i, smiles in enumerate(dataset):
props = calc_props_muv(smiles)
if props is not None:
results.append(props)
if verbose and i % 1000 == 0:
print("\rProcessed smiles: " + str(i), end='')
print("\nDone calculating properties")
return np.array(results)
def calc_dataset_props_basic(dataset, verbose=False):
# Calculate mol props for an entire dataset of smiles strings
results = []
for i, smiles in enumerate(dataset):
props = calc_props_basic(smiles)
if props is not None:
results.append(props)
if verbose and i % 1000 == 0:
print("\rProcessed smiles: " + str(i), end='')
print("\nDone calculating properties")
return np.array(results)
def calc_dataset_props_dekois(dataset, verbose=False):
# Calculate mol props for an entire dataset of smiles strings
results = []
for i, smiles in enumerate(dataset):
props = calc_props_dekois(smiles)
if props is not None:
results.append(props[1])
if verbose and i % 1000 == 0:
print("\rProcessed smiles: " + str(i), end='')
print("\nDone calculating properties")
return np.array(results)
def doe_score(actives, decoys):
all_feat = list(actives) + list(decoys)
up_p = np.percentile(all_feat, 95, axis=0)
low_p = np.percentile(all_feat, 5, axis=0)
norms = up_p - low_p
for i in range(len(norms)):
if norms[i] == 0:
norms[i] = 1.
active_norm = [act/norms for act in actives]
decoy_norm = [dec/norms for dec in decoys]
all_norm = active_norm + decoy_norm
active_embed = []
labels = [1] * (len(active_norm)-1) + [0] * len(decoy_norm)
for i, act in enumerate(active_norm):
comp = list(all_norm)
del comp[i]
dists = [100 - np.linalg.norm(c-act) for c in comp] # arbitrary large number to get scores in reverse order
fpr, tpr, _ = roc_curve(labels, dists)
fpr = fpr[::]
tpr = tpr[::]
a_score = 0
for i in range(len(fpr)-1):
a_score += (abs(0.5*( (tpr[i+1]+tpr[i])*(fpr[i+1]-fpr[i]) - (fpr[i+1]+fpr[i])*(fpr[i+1]-fpr[i]) )))
active_embed.append(a_score)
#print(np.average(active_embed))
return np.average(active_embed)
from collections import defaultdict
def lads_score_v2(actives, decoys):
# Similar to DEKOIS (v2)
# Lower is better (less like actives), higher is worse (more like actives)
active_fps = []
active_info = {}
info={}
atoms_per_bit = defaultdict(int)
for smi in actives:
m = Chem.MolFromSmiles(smi)
active_fps.append(AllChem.GetMorganFingerprint(m,3,useFeatures=True, bitInfo=info))
for key in info:
if key not in active_info:
active_info[key] = info[key]
env = Chem.FindAtomEnvironmentOfRadiusN(m, info[key][0][1], info[key][0][0])
amap={}
submol=Chem.PathToSubmol(m,env,atomMap=amap)
if info[key][0][1] == 0:
atoms_per_bit[key] = 1
else:
atoms_per_bit[key] = submol.GetNumHeavyAtoms()
decoys_fps = [AllChem.GetMorganFingerprint(Chem.MolFromSmiles(smi),3,useFeatures=True) for smi in decoys] # Roughly FCFP_6
master_active_fp_freq = defaultdict(int)
for fp in active_fps:
fp_dict = fp.GetNonzeroElements()
for k, v in fp_dict.items():
master_active_fp_freq[k] += 1
# Reweight
for k in master_active_fp_freq:
# Normalise
master_active_fp_freq[k] /= len(active_fps)
# Weight by size of bit
master_active_fp_freq[k] *= atoms_per_bit[k]
decoys_lads_avoid_scores = [sum([master_active_fp_freq[k] for k in decoy_fp.GetNonzeroElements()])/len(decoy_fp.GetNonzeroElements())
for decoy_fp in decoys_fps]
return decoys_lads_avoid_scores
def dg_score(actives, decoys):
# Similar to DEKOIS
# Lower is better (less like actives), higher is worse (more like actives)
active_fps = [AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(smi),3,useFeatures=True) for smi in actives] # Roughly FCFP_6
decoys_fps = [AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(smi),3,useFeatures=True) for smi in decoys] # Roughly FCFP_6
closest_sims = []
closest_sims_id = []
for active_fp in active_fps:
active_sims = []
for decoy_fp in decoys_fps:
active_sims.append(DataStructs.TanimotoSimilarity(active_fp, decoy_fp))
closest_sims.append(max(active_sims))
closest_sims_id.append(np.argmax(active_sims))
return closest_sims, closest_sims_id
def dg_score_rev(actives, decoys):
# Similar to DEKOIS
# | |
#!/usr/bin/env python3
import random
import asyncio
import re
import discord
from discord.ext import commands
from Users import Users
from random import choices
def get_hangman_art():
# prepare array of hangman art
art_array = []
with open("db_and_words\hangmen.txt") as my_file:
for line in my_file:
art_array.append(line)
# convert respective list index-ranges to string with ''.join
# the resulting art_array[0-6] will represent each stage of hangman
art_array[0] = "".join(art_array[0:6])
art_array[1] = "".join(art_array[7:13])
art_array[2] = "".join(art_array[14:20])
art_array[3] = "".join(art_array[21:27])
art_array[4] = "".join(art_array[28:34])
art_array[5] = "".join(art_array[35:41])
art_array[6] = "".join(art_array[42:49])
return art_array
def get_hangman_words():
# only read words file once so we won't have to re-open the file every game call
words_file = open("db_and_words\words.txt", "r")
words = words_file.readlines()
words_file.close()
return words
def battle_decider(fighter1, fighter2, fighter1_weight, fighter2_weight):
# choices function maps a selection to a probability, and selects one choice based off probability
winner = choices([fighter1, fighter2], [fighter1_weight, fighter2_weight])
print(winner)
# choices function returning [1] or [2] so use regex to pull the integers out
return int(re.findall("\d+", str(winner))[0])
def pick_word(cat):
if cat == 1:
random_word = random.choice(all_words[0:180])
category = "Country name"
elif cat == 2:
random_word = random.choice(all_words[181:319])
category = "Farm"
elif cat == 3:
random_word = random.choice(all_words[320:389])
category = "Camping"
elif cat == 4:
random_word = random.choice(all_words[390:490])
category = "Household items/devices"
elif cat == 5:
random_word = random.choice(all_words[491:603])
category = "Beach"
elif cat == 6:
random_word = random.choice(all_words[604:648])
category = "Holidays"
elif cat == 7:
random_word = random.choice(all_words[649:699])
category = "US States"
elif cat == 8:
random_word = random.choice(all_words[700:998])
category = "Sports & Hobbies"
else:
random_word = random.choice(all_words[649:699])
category = "US States"
# quick band-aid fix to truncate CR in text file, COMING BACK LATER TO FIX
length = len(random_word) - 1 # to remove carriage return, I'm not using unix format to make the list
random_word = random_word[:length] # truncate word with [:length] cause of carriage return in text file...
underscore_sequence = list("") # this will be our list of underscores
# it will be consistently replaced by guesses
# fill the underscore_sequence list with underscore underscore_sequencelate of the correct word
for x in random_word:
if x == " ":
underscore_sequence += " " # in the case of 2-word phrases, need to move everything over
elif x == "'":
underscore_sequence += " '"
else:
underscore_sequence += " \u2581" # if not a space, add: \u2581, a special underscore character.
# using to replace by correctly guessed letters
return random_word.upper(), category, underscore_sequence
def add_guess_to_list(guess, guessed): # accepts guess and list of all guesses
if len(guess.clean_content) > 1: # don't want to add whole word to guess list
all_guessed = "".join(map(str, guessed))
return guessed, all_guessed
guessed.extend(guess.clean_content.upper()) # add last guess to the list of guessed words
guessed.extend(" ") # add space to guessed list
all_guessed = "".join(map(str, guessed)) # messy syntax, convert the list into a string so bot can print it
return guessed, all_guessed
def find_matches(guess, correct_word, underscore_sequence):
index = 0
num_matches = 0
for x in correct_word:
index += 1
if x == " ":
index += 2
# if any matches, we need to replace underscore(s) in the sequence
# and increase the number of matches for the loop
if guess.clean_content.upper() == x:
# convulted index scheme due to underscore_sequence format
underscore_sequence[index * 2 - 1] = guess.clean_content.upper()
num_matches += 1
return num_matches, underscore_sequence
def get_slots_emoji_list():
with open("db_and_words\\emoji_names.txt", "r") as lines:
high_tier = []
mid_tier = []
low_tier = []
current_tier = ""
for line in lines:
line = line.rstrip("\n")
if line == "HIGH-TIER-LIST":
current_tier = "high"
continue
if line == "MEDIUM-TIER-LIST":
current_tier = "med"
continue
if line == "LOW-TIER-LIST":
current_tier = "low"
continue
if current_tier == "high":
high_tier.append(line)
elif current_tier == "med":
mid_tier.append(line)
elif current_tier == "low":
low_tier.append(line)
return high_tier, mid_tier, low_tier
# short decorator function declaration, confirm that command user has an account in database
def has_account():
def predicate(ctx):
user = Users(ctx.message.author.id)
if user.find_user() == 0:
return False
else:
return True
return commands.check(predicate)
# store data from text files into memory (emoji lists, hangman words, hangman art)
high_tier_emotes, mid_tier_emotes, low_tier_emotes = get_slots_emoji_list()
all_words = get_hangman_words()
hangmen = get_hangman_art()
class Games:
def __init__(self, client):
self.client = client
"""ROB FUNCTION"""
@has_account()
@commands.cooldown(1, 3600, commands.BucketType.user)
@commands.command(
name="rob",
description="Steal money from others",
brief="can use =steal",
aliases=["thief", "thieve", "ROB", "steal", "mug"],
pass_context=True,
)
async def rob(self, context, *args):
# create instance of the user starting the robbery
robber = Users(context.message.author.id)
# declare 30% fail chance, used to calculate chance of failing rob
fail_chance = 30
# pick a random user in the server to rob
# target variable will function as the victim user's "english" name
target = random.choice(list(context.message.server.members))
# make an instance of the target
victim = Users(target.id)
victim_id = target.id
counter = 1
# if they specified a rob target, change the random target to their specified target
if args:
try:
# use regex to extract only the user-id from the user targeted
victim_id = re.findall("\d+", args[0])[0]
victim = Users(victim_id)
# get_member() returns the "member" object that matches an id provided
target = context.message.server.get_member(victim_id)
# higher fail chance, 35%, if they want to specify a rob target
fail_chance = 35
# if the target doesn't have an account, change fail chance back to 30% and the target will reroll next loop
if victim.find_user() == 0:
fail_chance = 30
await self.client.say(
context.message.author.mention + " Your rob target doesn't have an account."
"\n**Rerolling** rob target now!"
)
if robber.get_user_peace_status() == 1:
fail_chance = 30
await self.client.say(
context.message.author.mention
+ " You are in :dove: **peace mode** :dove: and cannot use =rob @user."
"\n**Rerolling** rob target now!"
)
# pick a random user in the server to rob
# target variable will function as the victim user's "english" name
target = random.choice(list(context.message.server.members))
# make an instance of the target
victim = Users(target.id)
victim_id = target.id
elif victim.get_user_peace_status() == 1:
fail_chance = 30
await self.client.say(
context.message.author.mention
+ " That target is in :dove: **peace mode** :dove: and exempt to =rob @user."
"\n**Rerolling** rob target now!"
)
# pick a random user in the server to rob
# target variable will function as the victim user's "english" name
target = random.choice(list(context.message.server.members))
# make an instance of the target
victim = Users(target.id)
victim_id = target.id
except:
pass
# while the user to rob is the robber, re-roll the target
# while the user to rob does not have an account in the database, re-roll the target
while victim_id == context.message.author.id or victim.find_user() == 0:
# only try 120 members in the user's server
# otherwise if the user was the sole player with an account in the discord server, infinite while loop
# this part is inefficient, but only way I can think of right now with discord's functionality
if counter == 120:
# no users were found to rob if we hit 120 in the counter
# calculate random integer 1-100
# if the result is within 1 through fail chance, they failed the rob, so take bail money and return
if fail_chance >= random.randint(1, 100) >= 1:
robber_level = robber.get_user_level(0)
bail = int(robber_level * 8.4)
robber.update_user_money(bail * -1)
msg = (
"<a:policesiren2:490326123549556746> :oncoming_police_car: "
"<a:policesiren2:490326123549556746>\n<a:monkacop:490323719063863306>"
"\u200B \u200B \u200B \u200B \u200B \u200B \u200B \u200B \u200B \u200B"
"<a:monkacop:490323719063863306>\n" + "Police shot you in the process.\n"
"You spent **$" + str(bail) + "** to bail out of jail."
)
# embed the rob failure message, set thumbnail to 80x80 of a "police siren" gif
em = discord.Embed(description=msg, colour=0x607D4A)
em.set_thumbnail(url="https://cdn.discordapp.com/emojis/490326123549556746.gif?size=80")
await self.client.say(embed=em)
return
else:
# if they passed the fail test, give the user a small prize and return
bonus_prize = int(robber.get_user_level(0) * 29.3)
robber.update_user_money(bonus_prize)
msg = (
"**No users found to rob...** "
"\nOn the way back to your basement, you found **$"
+ str(bonus_prize)
+ "** "
+ "<:poggers:490322361891946496>"
)
# embed the rob confirmation message, set thumbnail to 40x40 of a "ninja" gif
em = discord.Embed(description=msg, colour=0x607D4A)
em.set_thumbnail(url="https://cdn.discordapp.com/emojis/419506568728543263.gif?size=40")
await self.client.say(embed=em)
return
target = random.choice(list(context.message.server.members))
# create a new instance of victim each loop
| |
None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class DetachSecurityGroupFromL3NetworkAction(inventory.APIDetachSecurityGroupFromL3NetworkMsg):
def __init__(self):
super(DetachSecurityGroupFromL3NetworkAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[DetachSecurityGroupFromL3NetworkAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class DetachUsbDeviceFromVmAction(inventory.APIDetachUsbDeviceFromVmMsg):
def __init__(self):
super(DetachUsbDeviceFromVmAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[DetachUsbDeviceFromVmAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class DetachVRouterRouteTableFromVRouterAction(inventory.APIDetachVRouterRouteTableFromVRouterMsg):
def __init__(self):
super(DetachVRouterRouteTableFromVRouterAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[DetachVRouterRouteTableFromVRouterAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class DownloadBackupFileFromPublicCloudAction(inventory.APIDownloadBackupFileFromPublicCloudMsg):
def __init__(self):
super(DownloadBackupFileFromPublicCloudAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[DownloadBackupFileFromPublicCloudAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ExportImageFromBackupStorageAction(inventory.APIExportImageFromBackupStorageMsg):
def __init__(self):
super(ExportImageFromBackupStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ExportImageFromBackupStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ExpungeDataVolumeAction(inventory.APIExpungeDataVolumeMsg):
def __init__(self):
super(ExpungeDataVolumeAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ExpungeDataVolumeAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ExpungeImageAction(inventory.APIExpungeImageMsg):
def __init__(self):
super(ExpungeImageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ExpungeImageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ExpungeVmInstanceAction(inventory.APIExpungeVmInstanceMsg):
def __init__(self):
super(ExpungeVmInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ExpungeVmInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GCAliyunSnapshotRemoteAction(inventory.APIGCAliyunSnapshotRemoteMsg):
def __init__(self):
super(GCAliyunSnapshotRemoteAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GCAliyunSnapshotRemoteAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GenerateApiJsonTemplateAction(inventory.APIGenerateApiJsonTemplateMsg):
def __init__(self):
super(GenerateApiJsonTemplateAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GenerateApiJsonTemplateAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GenerateApiTypeScriptDefinitionAction(inventory.APIGenerateApiTypeScriptDefinitionMsg):
def __init__(self):
super(GenerateApiTypeScriptDefinitionAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GenerateApiTypeScriptDefinitionAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GenerateGroovyClassAction(inventory.APIGenerateGroovyClassMsg):
def __init__(self):
super(GenerateGroovyClassAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GenerateGroovyClassAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GenerateInventoryQueryDetailsAction(inventory.APIGenerateInventoryQueryDetailsMsg):
def __init__(self):
super(GenerateInventoryQueryDetailsAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GenerateInventoryQueryDetailsAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GenerateQueryableFieldsAction(inventory.APIGenerateQueryableFieldsMsg):
def __init__(self):
super(GenerateQueryableFieldsAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GenerateQueryableFieldsAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GenerateSqlForeignKeyAction(inventory.APIGenerateSqlForeignKeyMsg):
def __init__(self):
super(GenerateSqlForeignKeyAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GenerateSqlForeignKeyAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GenerateSqlIndexAction(inventory.APIGenerateSqlIndexMsg):
def __init__(self):
super(GenerateSqlIndexAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GenerateSqlIndexAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GenerateSqlVOViewAction(inventory.APIGenerateSqlVOViewMsg):
def __init__(self):
super(GenerateSqlVOViewAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GenerateSqlVOViewAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GenerateTestLinkDocumentAction(inventory.APIGenerateTestLinkDocumentMsg):
def __init__(self):
super(GenerateTestLinkDocumentAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GenerateTestLinkDocumentAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetAccountQuotaUsageAction(inventory.APIGetAccountQuotaUsageMsg):
def __init__(self):
super(GetAccountQuotaUsageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetAccountQuotaUsageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetAlarmDataAction(inventory.APIGetAlarmDataMsg):
def __init__(self):
super(GetAlarmDataAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetAlarmDataAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetAllEventMetadataAction(inventory.APIGetAllEventMetadataMsg):
def __init__(self):
super(GetAllEventMetadataAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetAllEventMetadataAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetAllMetricMetadataAction(inventory.APIGetAllMetricMetadataMsg):
def __init__(self):
super(GetAllMetricMetadataAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetAllMetricMetadataAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetAttachablePublicL3ForVRouterAction(inventory.APIGetAttachablePublicL3ForVRouterMsg):
def __init__(self):
super(GetAttachablePublicL3ForVRouterAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetAttachablePublicL3ForVRouterAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetAttachableVpcL3NetworkAction(inventory.APIGetAttachableVpcL3NetworkMsg):
def __init__(self):
super(GetAttachableVpcL3NetworkAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetAttachableVpcL3NetworkAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetAuditDataAction(inventory.APIGetAuditDataMsg):
def __init__(self):
super(GetAuditDataAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetAuditDataAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetAvailableTriggersAction(inventory.APIGetAvailableTriggersMsg):
def __init__(self):
super(GetAvailableTriggersAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetAvailableTriggersAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetBackupStorageCandidatesForImageMigrationAction(inventory.APIGetBackupStorageCandidatesForImageMigrationMsg):
def __init__(self):
super(GetBackupStorageCandidatesForImageMigrationAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetBackupStorageCandidatesForImageMigrationAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetBackupStorageCapacityAction(inventory.APIGetBackupStorageCapacityMsg):
def __init__(self):
super(GetBackupStorageCapacityAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetBackupStorageCapacityAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetBackupStorageTypesAction(inventory.APIGetBackupStorageTypesMsg):
def __init__(self):
super(GetBackupStorageTypesAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetBackupStorageTypesAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCandidateBackupStorageForCreatingImageAction(inventory.APIGetCandidateBackupStorageForCreatingImageMsg):
def __init__(self):
super(GetCandidateBackupStorageForCreatingImageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCandidateBackupStorageForCreatingImageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCandidateIsoForAttachingVmAction(inventory.APIGetCandidateIsoForAttachingVmMsg):
def __init__(self):
super(GetCandidateIsoForAttachingVmAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCandidateIsoForAttachingVmAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCandidateLdapEntryForBindingAction(inventory.APIGetCandidateLdapEntryForBindingMsg):
def __init__(self):
super(GetCandidateLdapEntryForBindingAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCandidateLdapEntryForBindingAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCandidatePrimaryStoragesForCreatingVmAction(inventory.APIGetCandidatePrimaryStoragesForCreatingVmMsg):
def __init__(self):
super(GetCandidatePrimaryStoragesForCreatingVmAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCandidatePrimaryStoragesForCreatingVmAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCandidateVmForAttachingIsoAction(inventory.APIGetCandidateVmForAttachingIsoMsg):
def __init__(self):
super(GetCandidateVmForAttachingIsoAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCandidateVmForAttachingIsoAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCandidateVmNicForSecurityGroupAction(inventory.APIGetCandidateVmNicForSecurityGroupMsg):
def __init__(self):
super(GetCandidateVmNicForSecurityGroupAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCandidateVmNicForSecurityGroupAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCandidateVmNicsForLoadBalancerAction(inventory.APIGetCandidateVmNicsForLoadBalancerMsg):
def __init__(self):
super(GetCandidateVmNicsForLoadBalancerAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCandidateVmNicsForLoadBalancerAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCandidateZonesClustersHostsForCreatingVmAction(inventory.APIGetCandidateZonesClustersHostsForCreatingVmMsg):
def __init__(self):
super(GetCandidateZonesClustersHostsForCreatingVmAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCandidateZonesClustersHostsForCreatingVmAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetConnectionAccessPointFromRemoteAction(inventory.APIGetConnectionAccessPointFromRemoteMsg):
def __init__(self):
super(GetConnectionAccessPointFromRemoteAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetConnectionAccessPointFromRemoteAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetConnectionBetweenL3NetworkAndAliyunVSwitchAction(inventory.APIGetConnectionBetweenL3NetworkAndAliyunVSwitchMsg):
def __init__(self):
super(GetConnectionBetweenL3NetworkAndAliyunVSwitchAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetConnectionBetweenL3NetworkAndAliyunVSwitchAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class GetCpuMemoryCapacityAction(inventory.APIGetCpuMemoryCapacityMsg):
def __init__(self):
super(GetCpuMemoryCapacityAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[GetCpuMemoryCapacityAction] cannot be | |
<filename>src/fuzzingtool/interfaces/cli/cli_output.py<gh_stars>0
# Copyright (c) 2020 - present <NAME> <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime
import threading
import sys
from typing import Tuple
from math import floor, ceil, log10
from shutil import get_terminal_size
from ...objects.result import Result
from ...utils.consts import MAX_PAYLOAD_LENGTH_TO_OUTPUT, FuzzType
from ...utils.utils import fix_payload_to_output
from ...utils.http_utils import get_parsed_url, get_pure_url
from ...utils.result_utils import ResultUtils
class Colors:
"""Class that handle with the colors"""
RESET = '\033[0m'
GRAY = '\033[90m'
YELLOW = '\033[33m'
RED = '\u001b[31;1m'
GREEN = '\u001b[32;1m'
BLUE = '\u001b[34m'
BLUE_GRAY = '\033[36m'
CYAN = '\u001b[36m'
LIGHT_GRAY = '\u001b[38;5;250m'
LIGHT_YELLOW = '\u001b[33;1m'
LIGHT_RED = '\033[91m'
LIGHT_GREEN = '\u001b[38;5;48m'
BOLD = '\033[1m'
@staticmethod
def disable() -> None:
"""Disable the colors of the program"""
Colors.RESET = ''
Colors.GRAY = ''
Colors.YELLOW = ''
Colors.RED = ''
Colors.GREEN = ''
Colors.BLUE = ''
Colors.BLUE_GRAY = ''
Colors.CYAN = ''
Colors.LIGHT_GRAY = ''
Colors.LIGHT_YELLOW = ''
Colors.LIGHT_RED = ''
Colors.LIGHT_GREEN = ''
Colors.BOLD = ''
class CliOutput:
"""Class that handle with the outputs
Attributes:
lock: The threads locker for screen output
break_line: A string to break line
last_inline: A flag to say if the last output was inline or not
info: The info label
warning: The warning label
error: The error label
abort: The abort label
worked: The worked label
not_worked: The not worked label
"""
@staticmethod
def print(msg: str) -> None:
"""Print the message
@type msg: str
@param msg: The message
"""
print(msg)
@staticmethod
def help_title(num_spaces: int, title: str) -> None:
"""Output the help title
@type num_spaces: int
@param num_spaces: The number of spaces before the title
@type title: str
@param title: The title or subtitle
"""
print("\n"+' '*num_spaces+title)
@staticmethod
def help_content(num_spaces: int, command: str, desc: str) -> None:
"""Output the help content
@type num_spaces: int
@param num_spaces: The number of spaces before the content
@type command: str
@param command: The command to be used in the execution argument
@type desc: str
@param desc: The description of the command
"""
max_command_size_with_space = 27
if (len(command)+num_spaces) <= max_command_size_with_space:
print(' '
* num_spaces
+ ("{:<" + str(max_command_size_with_space-num_spaces) + "}")
.format(command)
+ ' ' + desc)
else:
print(' '
* num_spaces
+ ("{:<" + str(max_command_size_with_space-num_spaces) + "}")
.format(command))
print(' '*(max_command_size_with_space)+' '+desc)
def __init__(self):
self.__lock = threading.Lock()
self.__last_inline = False
self.__info = f'{Colors.GRAY}[{Colors.BLUE_GRAY}INFO{Colors.GRAY}]{Colors.RESET} '
self.__warning = f'{Colors.GRAY}[{Colors.YELLOW}WARNING{Colors.GRAY}]{Colors.RESET} '
self.__error = f'{Colors.GRAY}[{Colors.RED}ERROR{Colors.GRAY}]{Colors.RESET} '
self.__abort = f'{Colors.GRAY}[{Colors.RED}ABORT{Colors.GRAY}]{Colors.RESET} '
self.__worked = f'{Colors.GRAY}[{Colors.GREEN}+{Colors.GRAY}]{Colors.RESET} '
self.__not_worked = f'{Colors.GRAY}[{Colors.RED}-{Colors.GRAY}]{Colors.RESET} '
def set_simple_output_mode(self) -> None:
"""Set the display to simple output mode, change labels"""
def get_blank_time() -> str:
return ''
self.__get_time = get_blank_time
self.__info = f'{Colors.GRAY}[{Colors.BLUE_GRAY}*{Colors.GRAY}]{Colors.RESET} '
self.__warning = f'{Colors.GRAY}[{Colors.YELLOW}!{Colors.GRAY}]{Colors.RESET} '
self.__error = f'{Colors.GRAY}[{Colors.RED}!!{Colors.GRAY}]{Colors.RESET} '
self.__abort = f'{Colors.GRAY}[{Colors.RED}AB{Colors.GRAY}]{Colors.RESET} '
self.__worked = f'{Colors.GRAY}[{Colors.GREEN}+{Colors.GRAY}]{Colors.RESET} '
self.__not_worked = f'{Colors.GRAY}[{Colors.RED}-{Colors.GRAY}]{Colors.RESET} '
def set_new_job(self, total_requests: int) -> None:
"""Set the variables from job manager
@type total_requests: int
@param total_requests: The number of requests that'll be made
"""
self.__total_requests = total_requests
self.__request_indent = ceil(log10(total_requests))
self.__progress_length = (38 # Progress bar, spaces, square brackets and slashes
+ MAX_PAYLOAD_LENGTH_TO_OUTPUT
+ self.__request_indent * 2)
def info_box(self, msg: str) -> None:
"""Print the message with a info label
@type msg: str
@param msg: The message
"""
print(f'{self._get_break()}{self.__get_time()}{self.__get_info(msg)}')
def error_box(self, msg: str) -> None:
"""End the application with error label and a message
@type msg: str
@param msg: The message
"""
exit(f'{self._get_break()}{self.__get_time()}{self.__get_error(msg)}')
def warning_box(self, msg: str) -> None:
"""Print the message with a warning label
@type msg: str
@param msg: The message
"""
with self.__lock:
sys.stdout.flush()
print(f'{self._get_break()}{self.__get_time()}{self.__get_warning(msg)}')
def abort_box(self, msg: str) -> None:
"""Print the message with abort label and a message
@type msg: str
@param msg: The message
"""
with self.__lock:
sys.stdout.flush()
print(f'{self._get_break()}{self.__get_time()}{self.__get_abort(msg)}')
def worked_box(self, msg: str) -> None:
"""Print the message with worked label and a message
@type msg: str
@param msg: The message
"""
print(f'{self.__get_time()}{self.__get_worked(msg)}')
def not_worked_box(self, msg: str) -> None:
"""Print the message with not worked label and a message
@type msg: str
@param msg: The message
"""
with self.__lock:
print(f"{self.__get_time()}{self.__get_not_worked(msg)}")
def ask_yes_no(self, ask_type: str, msg: str) -> bool:
"""Ask a question for the user
@type ask_type: str
@param ask_type: The type of the asker
@type msg: str
@param msg: The message
@returns bool: The answer based on the user's input
"""
if ask_type == 'warning':
get_type = self.__get_warning
else:
get_type = self.__get_info
print(f"{self._get_break()}{self.__get_time()}{get_type(msg)} (y/N) ", end='')
action = input()
if action == 'y' or action == 'Y':
return True
return False
def ask_data(self, msg: str) -> str:
"""Ask data for the user
@type msg: str
@param msg: The message
@returns mixed: The data asked
"""
print(f"{self._get_break()}{self.__get_time()}{self.__get_info(msg)}", end=': ')
return input()
def print_config(self, key: str, value: str = '', spaces: int = 0) -> None:
"""The config's printer function
@type key: str
@param key: The name of the config
@type value: str
@param value: The value of that config
@type spaces: int
@param spaces: The number of spaces to indent the config output
"""
print(f"{' '*(spaces+3)}{Colors.BLUE}{key}: "
f"{Colors.LIGHT_YELLOW}{value}{Colors.RESET}")
def print_configs(self,
target: dict,
dictionary: dict) -> None:
"""Prints the program configuration
@type target: dict
@param taget: The target
@type dictionary: dict
@param dictionary: The dictionary used in the tests
"""
print("")
spaces = 3
self.print_config("Target", get_parsed_url(get_pure_url(target['url'])).hostname)
self.print_config("Method", target['method'], spaces)
self.print_config("HTTP headers", target['header'], spaces)
if target['body']:
self.print_config("Body data", target['body'], spaces)
self.print_config("Fuzzing type", target['type_fuzzing'], spaces)
dict_size = dictionary['len']
if 'removed' in dictionary.keys() and dictionary['removed']:
dict_size = (f"{dictionary['len']} "
f"(removed {dictionary['removed']} "
f"duplicated payloads)")
self.print_config("Dictionary size", dict_size)
print("")
def get_percentage(self, item_index: int) -> str:
"""Get the percentage string from item_index per total_requests
@type item_index: int
@param item_index: The actual request index
@returns str: The percentage str
"""
return f"{self._get_percentage_value(item_index, self.__total_requests)}%"
def progress_status(self,
item_index: int,
payload: str,
current_job: int,
total_jobs: int) -> None:
"""Output the progress status of the fuzzing
@type item_index: int
@param item_index: The actual request index
@type payload: str
@param payload: The payload used in the request
"""
jobs_indent = ceil(log10(total_jobs))
progress_length = self.__progress_length + (2 * jobs_indent)
if progress_length <= get_terminal_size()[0]:
percentage_value = self._get_percentage_value(item_index, self.__total_requests)
status = self._get_progress_bar(percentage_value)
payload = fix_payload_to_output(payload)
status += (f" {Colors.LIGHT_YELLOW}{percentage_value:>3}% {Colors.RESET}"
+ f"{Colors.GRAY}[{Colors.LIGHT_GRAY}{item_index:>{self.__request_indent}}"
+ f"{Colors.GRAY}/{Colors.LIGHT_GRAY}{self.__total_requests}"
+ f"{Colors.GRAY}]{Colors.RESET} "
+ f"{Colors.GRAY}[{Colors.LIGHT_GRAY}{current_job:>{jobs_indent}}"
+ f"{Colors.GRAY}/{Colors.LIGHT_GRAY}{total_jobs}"
+ f"{Colors.GRAY}]{Colors.RESET}")
status += f"{Colors.GRAY} :: {Colors.LIGHT_GRAY}{payload:<{MAX_PAYLOAD_LENGTH_TO_OUTPUT}}"
with self.__lock:
if not self.__last_inline:
self.__last_inline = True
self.__erase_line()
print(f"\r{status}", end='')
def print_result(self, result: Result, vuln_validator: bool) -> None:
"""Custom output print for box mode
@type result: Result
@param result: The result object
@type vuln_validator: bool
@param vuln_validator: Case the output is marked as vulnerable
"""
formatted_result_str = self.__get_formatted_result(result)
if not vuln_validator:
self.not_worked_box(formatted_result_str)
else:
with self.__lock:
if self.__last_inline:
self.__last_inline = False
self.__erase_line()
self.worked_box(formatted_result_str)
def _get_break(self) -> str:
"""Get a break line if the last message was inline
@returns str: The break line
"""
if self.__last_inline:
self.__last_inline = False
return '\n'
return ''
def _get_percentage_value(self, item_index: int, total_requests: int) -> int:
"""Get the percentage from item_index per total_requests
@type item_index: int
@param item_index: The actual request index
@type total_requests: int
@param total_requests: The total of requests quantity
@returns int: The percentage value
"""
return int((item_index/total_requests)*100)
def _get_progress_bar(self, percentage_value: int) -> str:
"""Get a formated progress bar
@type percentage_value: int
@param percentage_value: The | |
#!/usr/bin/env python3
"""
Converts Satisfactory save games (.sav) into a more readable format (.json)
"""
import struct
import functools
import itertools
import csv
import binascii
import sys
import json
import argparse
import pathlib
parser = argparse.ArgumentParser(
description='Converts Satisfactory save games into a more readable format')
parser.add_argument('file', metavar='FILE', type=str,
help='save game to process (.sav file extension)')
parser.add_argument('--output', '-o', type=str, help='output file (.json)')
parser.add_argument('--pretty', '-p', help='pretty print json', action='store_true')
args = parser.parse_args()
extension = pathlib.Path(args.file).suffix
if extension != '.sav':
print('error: extension of save file should be .sav', file=sys.stderr)
exit(1)
f = open(args.file, 'rb')
# determine the file size so that we can
f.seek(0, 2)
fileSize = f.tell()
f.seek(0, 0)
bytesRead = 0
def assertFail(message):
print('assertion failed: ' + message, file=sys.stderr)
# show the next bytes to help debugging
print(readHex(32))
input()
assert False
def readInt():
global bytesRead
bytesRead += 4
return struct.unpack('i', f.read(4))[0]
def readFloat():
global bytesRead
bytesRead += 4
return struct.unpack('f', f.read(4))[0]
def readLong():
global bytesRead
bytesRead += 8
return struct.unpack('q', f.read(8))[0]
def readByte():
global bytesRead
bytesRead += 1
return struct.unpack('b', f.read(1))[0]
def assertNullByte():
global bytesRead
bytesRead += 1
zero = f.read(1)
if zero != b'\x00':
assertFail('not null but ' + str(zero))
def readLengthPrefixedString():
"""
Reads a string that is prefixed with its length
"""
global bytesRead
length = readInt()
if length == 0:
return ''
if length < 0:
# Read UTF-16
length = length * -2
chars = f.read(length-2)
zero = f.read(2)
bytesRead += length
if zero != b'\x00\x00': # We assume that the last byte of a string is alway \x00\x00
if length > 100:
assertFail('zero is ' + str(zero) + ' in ' + str(chars[0:100]))
else:
assertFail('zero is ' + str(zero) + ' in ' + str(chars))
return chars.decode('utf-16')
# Read ASCII
chars = f.read(length-1)
zero = f.read(1)
bytesRead += length
if zero != b'\x00': # We assume that the last byte of a string is alway \x00
if length > 100:
assertFail('zero is ' + str(zero) + ' in ' + str(chars[0:100]))
else:
assertFail('zero is ' + str(zero) + ' in ' + str(chars))
return chars.decode('ascii')
def readHex(count):
"""
Reads count bytes and returns their hex form
"""
global bytesRead
bytesRead += count
chars = f.read(count)
c = 0
result = ''
for i in chars:
result += format(i, '02x') + ' '
c += 1
if (c % 4 == 0 and c < count - 1):
result += ' '
return result
# Read the file header
saveHeaderType = readInt()
saveVersion = readInt() # Save Version
buildVersion = readInt() # BuildVersion
mapName = readLengthPrefixedString() # MapName
mapOptions = readLengthPrefixedString() # MapOptions
sessionName = readLengthPrefixedString() # SessionName
playDurationSeconds = readInt() # PlayDurationSeconds
saveDateTime = readLong() # SaveDateTime
'''
to convert this FDateTime to a unix timestamp use:
saveDateSeconds = saveDateTime / 10000000
# see https://stackoverflow.com/a/1628018
print(saveDateSeconds-62135596800)
'''
sessionVisibility = readByte() # SessionVisibility
entryCount = readInt() # total entries
saveJson = {
'saveHeaderType': saveHeaderType,
'saveVersion': saveVersion,
'buildVersion': buildVersion,
'mapName': mapName,
'mapOptions': mapOptions,
'sessionName': sessionName,
'playDurationSeconds': playDurationSeconds,
'saveDateTime': saveDateTime,
'sessionVisibility': sessionVisibility,
'objects': [],
'collected': []
}
def readActor():
className = readLengthPrefixedString()
levelName = readLengthPrefixedString()
pathName = readLengthPrefixedString()
needTransform = readInt()
a = readFloat()
b = readFloat()
c = readFloat()
d = readFloat()
x = readFloat()
y = readFloat()
z = readFloat()
sx = readFloat()
sy = readFloat()
sz = readFloat()
wasPlacedInLevel = readInt()
return {
'type': 1,
'className': className,
'levelName': levelName,
'pathName': pathName,
'needTransform': needTransform,
'transform': {
'rotation': [a, b, c, d],
'translation': [x, y, z],
'scale3d': [sx, sy, sz],
},
'wasPlacedInLevel': wasPlacedInLevel
}
def readObject():
className = readLengthPrefixedString()
levelName = readLengthPrefixedString()
pathName = readLengthPrefixedString()
outerPathName = readLengthPrefixedString()
return {
'type': 0,
'className': className,
'levelName': levelName,
'pathName': pathName,
'outerPathName': outerPathName
}
for i in range(0, entryCount):
type = readInt()
if type == 1:
saveJson['objects'].append(readActor())
elif type == 0:
saveJson['objects'].append(readObject())
else:
assertFail('unknown type ' + str(type))
elementCount = readInt()
# So far these counts have always been the same and the entities seem to belong 1 to 1 to the actors/objects read above
if elementCount != entryCount:
assertFail('elementCount ('+str(elementCount) +
') != entryCount('+str(entryCount)+')')
def readProperty(properties):
name = readLengthPrefixedString()
if name == 'None':
return
prop = readLengthPrefixedString()
length = readInt()
index = readInt()
property = {
'name': name,
'type': prop,
'_length': length,
'index': index
}
if prop == 'IntProperty':
assertNullByte()
property['value'] = readInt()
elif prop == 'StrProperty':
assertNullByte()
property['value'] = readLengthPrefixedString()
elif prop == 'StructProperty':
type = readLengthPrefixedString()
property['structUnknown'] = readHex(17) # TODO
if type == 'Vector' or type == 'Rotator':
x = readFloat()
y = readFloat()
z = readFloat()
property['value'] = {
'type': type,
'x': x,
'y': y,
'z': z
}
elif type == 'Box':
minX = readFloat()
minY = readFloat()
minZ = readFloat()
maxX = readFloat()
maxY = readFloat()
maxZ = readFloat()
isValid = readByte()
property['value'] = {
'type': type,
'min': [minX, minY, minZ],
'max': [maxX, maxY, maxZ],
'isValid': isValid
}
elif type == 'LinearColor':
b = readFloat()
g = readFloat()
r = readFloat()
a = readFloat()
property['value'] = {
'type': type,
'b': b,
'g': g,
'r': r,
'a': a
}
elif type == 'Transform':
props = []
while (readProperty(props)):
pass
property['value'] = {
'type': type,
'properties': props
}
elif type == 'Quat':
a = readFloat()
b = readFloat()
c = readFloat()
d = readFloat()
property['value'] = {
'type': type,
'a': a,
'b': b,
'c': c,
'd': d
}
elif type == 'RemovedInstanceArray' or type == 'InventoryStack':
props = []
while (readProperty(props)):
pass
property['value'] = {
'type': type,
'properties': props
}
elif type == 'InventoryItem':
unk1 = readLengthPrefixedString() # TODO
itemName = readLengthPrefixedString()
levelName = readLengthPrefixedString()
pathName = readLengthPrefixedString()
props = []
readProperty(props)
# can't consume null here because it is needed by the entaingling struct
property['value'] = {
'type': type,
'unk1': unk1,
'itemName': itemName,
'levelName': levelName,
'pathName': pathName,
'properties': props
}
elif type == 'Color':
a = readHex(1)
b = readHex(1)
c = readHex(1)
d = readHex(1)
property['value'] = {
'type': type,
'r': a,
'g': b,
'b': c,
'a': d
}
elif type == 'RailroadTrackPosition':
levelName = readLengthPrefixedString()
pathName = readLengthPrefixedString()
offset = readFloat()
forward = readFloat()
property['value'] = {
'type': type,
'levelName': levelName,
'pathName': pathName,
'offset': offset,
'forward': forward
}
elif type == 'TimerHandle':
property['value'] = {
'type': type,
'handle': readLengthPrefixedString()
}
else:
print(property)
assertFail('Unknown type: ' + type)
elif prop == 'ArrayProperty':
itemType = readLengthPrefixedString()
assertNullByte()
count = readInt()
values = []
if itemType == 'ObjectProperty':
for j in range(0, count):
values.append({
'levelName': readLengthPrefixedString(),
'pathName': readLengthPrefixedString()
})
elif itemType == 'StructProperty':
structName = readLengthPrefixedString()
structType = readLengthPrefixedString()
structSize = readInt()
zero = readInt()
if zero != 0:
assertFail('not zero: ' + str(zero))
type = readLengthPrefixedString()
property['structName'] = structName
property['structType'] = structType
property['structInnerType'] = type
property['structUnknown'] = readHex(17) # TODO what are those?
property['_structLength'] = structSize
for i in range(0, count):
props = []
while (readProperty(props)):
pass
values.append({
'properties': props
})
elif itemType == 'IntProperty':
for i in range(0, count):
values.append(readInt())
elif itemType == 'ByteProperty':
for i in range(0, count):
values.append(readByte())
else:
assertFail('unknown itemType ' + itemType + ' in name ' + name)
property['value'] = {
'type': itemType,
'values': values
}
elif prop == 'ObjectProperty':
assertNullByte()
property['value'] = {
'levelName': readLengthPrefixedString(),
'pathName': readLengthPrefixedString()
}
elif prop == 'BoolProperty':
property['value'] = readByte()
assertNullByte()
elif prop == 'FloatProperty': # TimeStamps that are FloatProperties are negative to the current time in seconds?
assertNullByte()
property['value'] = readFloat()
elif prop == 'EnumProperty':
enumName = readLengthPrefixedString()
assertNullByte()
valueName = readLengthPrefixedString()
property['value'] = {
'enum': enumName,
'value': valueName,
}
elif prop == 'NameProperty':
assertNullByte()
property['value'] = readLengthPrefixedString()
elif prop == 'MapProperty':
name = readLengthPrefixedString()
valueType = readLengthPrefixedString()
for i in range(0, 5):
assertNullByte()
count = readInt()
values = {
}
for i in range(0, count):
key = readInt()
props = []
while readProperty(props):
pass
values[key] = props
property['value'] = {
'name': name,
'type': valueType,
'values': values
}
elif prop == 'ByteProperty': # TODO
unk1 = readLengthPrefixedString() # TODO
if unk1 == 'None':
assertNullByte()
property['value'] = {
'unk1': unk1,
'unk2': readByte()
}
else:
assertNullByte()
unk2 = readLengthPrefixedString() # TODO
property['value'] = {
'unk1': unk1,
'unk2': unk2
}
elif prop == 'TextProperty':
assertNullByte()
property['unknown1'] = readInt()
property['unknown2'] = readByte()
property['unknown3'] = readInt()
property['unknown4'] = readLengthPrefixedString()
property['value'] = readLengthPrefixedString()
| |
#!/bin/env python
#===============================================================================
# NAME: GseApi.py
#
# DESCRIPTION: A basic API of command and telemetry monitoring capabilities.
# AUTHOR: reder
# EMAIL: <EMAIL>
# DATE CREATED: June 17, 2015
#
# Copyright 2015, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
#===============================================================================
from __future__ import print_function
import sys
import os
import logging
import time
import glob
import signal
import subprocess
from subprocess import PIPE, STDOUT
from fprime.gse.utils import Logger
from fprime.gse.utils import command_args
from checksum import *
from fprime.gse.models.serialize.u32_type import *
from fprime.gse.controllers import command_loader
from fprime.gse.controllers import commander
from fprime.gse.controllers import event_loader
from fprime.gse.controllers import event_listener
from fprime.gse.controllers import channel_loader
from fprime.gse.controllers import packetize_channel_loader
from fprime.gse.controllers import channel_listener
from fprime.gse.controllers import packetize_channel_listener
from fprime.gse.controllers import client_sock
from fprime.gse.controllers import socket_listener
from fprime.gse.controllers import file_uplink_client
from fprime.gse.controllers import file_downlink_client
from fprime.gse.controllers.file_downlink_client import DownlinkStatus
from fprime.gse.controllers.file_uplink_client import UplinkStatus
from ConfigManager import ConfigManager
from ConfigParser import NoOptionError
class GseApi(object):
"""
This class is a general API into the gse.py graphical functionality
but is indepent of any GUI package. It is used to build test applicaitons
for commanding and listening to event and channel telemetry.
This class will be used to build three command line applicaitons which
are:
gse_send(...) to send a command.
gse_send_and_wait(...) to send command and wait for log event or channel telemetry value.
gse_monitor(...) to monitor for log event messages or channel telemetry values.
If blocking then block and update console and log file, if not blocking then
poll and if message then return it and update console and log file.
"""
def __init__(self, server_addr='127.0.0.1', port=50000, generated_path='', packet_spec_path='', build_root='', log_file_cmds=None, log_file_events=None, log_file_channel=None, log_file_path=None, log_file_updown=None, listener_enabled=False, verbose=False, quiet=False):
"""
Constructor.
@param server_addr: Socket server addr
@param port: Socket server port number
@param packet_spec_path: path to packet definition XML
@param log_file_cmds: Name of command log file
@param log_file_events: Name of log event log file
@param log_file_channel: Name of channel telemetry log file
@param log_file_path: Name of common directory path to save log files into
@param listener_enabled: If True then event/channel telemetry listener thread enabled, else dissabled
@param verbose: Enable diagonistic display of information
"""
# 1. Connect to socket server using controllers.client_sock.py
# 2. Create log files and loggers
# 3. Load configuration for commands, events, channels, using controllers.XXXXX_loader.py
# 3. Start listener thread controllers.event_listern.py
self.quiet = quiet
# For every console output log to a default file and stdout.
logfile = time.strftime("%y-%m-%d-%H-%M-%S", time.gmtime()) + '_GSEStdOut.log'
#p = opts.log_file_path + os.sep + "stdout"
p = '.' + os.sep + "stdout"
if not os.path.exists(p):
os.makedirs(p)
f = p + os.sep + logfile
# setup default for generated python command / telemetry descriptor files.
# these are autocoded descriptions of all.
config = ConfigManager.getInstance()
self.print_msg("Searching for standard configuration")
#Updated unified configuration settings
self.build_root = self.get_configured_setting(build_root, config, "filepaths", "build_root")
self.generated_path = self.get_configured_setting(generated_path, config, "filepaths", "generated_path")
#Packet spec path not strictly required, thus removing it
try:
self.packet_spec_path = self.get_configured_setting(packet_spec_path, config, "filepaths", "packet_path")
except NoOptionError:
self.packet_spec_path = None
# display configuration before starting GUI here...
sep_line = 80*"="
if verbose:
logger = Logger.connectOutputLogger(f,'stdout', logging.INFO)
else:
logger = Logger.connectOutputLogger(f,'stdout', logging.WARNING)
logger.info("Created log: %s" % f)
logger.info("User: %s" % os.environ['USER'])
(sysname, nodename, release, version, machine) = os.uname()
logger.info("OS Name: %s" % sysname)
logger.info("Machine Network Host Name: %s" % nodename)
logger.info("Release: %s" % release)
logger.info("Version: %s" % version)
logger.info("Machine: %s" % machine)
logger.info(sep_line)
# load commands, events, channelized telemetry, and event listener
sys.path.append(generated_path)
# add path for serializables
sys.path.append(generated_path + os.sep + "serializable")
self._cmds = command_loader.CommandLoader.getInstance()
self._cmds.create(generated_path + os.sep + "commands")
self._events = event_loader.EventLoader.getInstance()
self._events.create(generated_path + os.sep + "events")
self._channels = channel_loader.ChannelLoader.getInstance()
self._channels.create(generated_path + os.sep + "channels")
if not self.packet_spec_path is None and self.packet_spec_path != "":
self._pkt_channels = packetize_channel_loader.PacketizeChannelLoader.getInstance()
self._pkt_channels.create(self.packet_spec_path)
self._ev_listener = event_listener.EventListener.getInstance()
self._ev_listener.setupLogging()
self._ch_listener = channel_listener.ChannelListener.getInstance()
self._ch_listener.setupLogging()
self._pk_listener = packetize_channel_listener.PacketizeChannelListener.getInstance()
#self._pk_listener.setupLogging()
self.__sock_listener = socket_listener.SocketListener.getInstance()
self.__logger = logger
self.__server_addr = server_addr
self.__port = port
# Uplink and downlink clients will log to null handler if none is specified
file_uplink_client.set_logger(log_folder=log_file_updown)
file_downlink_client.set_logger(log_folder=log_file_updown)
# connect to TCP server
try:
self.__sock = client_sock.ClientSocket(server_addr, port)
self.__sock.send("Register GUI\n")
self.__sock_listener.connect(self.__sock)
except IOError:
self.__sock = None
super(GseApi, self).__init__()
def get_configured_setting(self, cmdline, config, group, key, environment=None):
'''
Get a configuration setting from the gse.ini file or from the environment.
@param cmdline: value passed in from command line
@param config: configuration dictionary passed in
@param group: group in configuration file
@param key: configuration key
@param environment: environment varable, if not key.upper
'''
#Default the environment key
if environment is None:
environment = key.upper()
#Get configuration in order of precedence
# 1. Command line
# 2. Environment variable (most)
# 2. gse.ini
try:
if not cmdline is None and cmdline != "":
return cmdline
elif environment in os.environ:
return os.environ[environment]
value = config.get(group, key)
if value != "" and not value is None:
return config.get(group, key)
except NoOptionError:
self.print_msg("WARNING: {0} not set. Specify on the command line,".format(key) +
"in the environment, or in gse.ini.",
file=sys.stderr)
#Re throw the problem
raise
def print_msg(self, message, file=sys.stdout):
'''
Print a message subject to the quietness level
'''
if not self.quiet or file == sys.stderr:
print(message, file=file)
def disconnect(self):
'''
Disconnect form the socket
'''
self.__sock.disconnect()
class TimeoutException(Exception):
pass
def _timeout_sig_handler(self, signum, frame):
raise self.TimeoutException()
def __ctrl_c_sig_handler(self, signum, frame):
raise Exception('Ctrl-C Received, Exiting.')
def __loop_queue(self, id, type, timeout=None):
"""
Grabs all telemetry and data in event listener's queue until the queried event / tlm id is found.
Returns a tuple with two lists (tlm_list,evr_list)
"""
tlm_list = []
evr_list = []
recv_id= ''
if timeout:
signal.signal(signal.SIGALRM, self._timeout_sig_handler)
#Note: signal.alarm only works on seconds, interval timers
# can use a float number of seconds, consistent with
# python's time.time()
signal.setitimer(signal.ITIMER_REAL, timeout)
self.print_msg("Waiting for {0} ID {1}".format(type, id))
try:
notFound = True
while notFound:
tlm, evr = self._pop_queue()
if tlm is None and evr is None and timeout == 0:
raise self.TimeoutException()
elif tlm is None and evr is None:
time.sleep(0.1)
else:
if tlm:
tlm_list.append(tlm)
(recv_id, _) = tlm
if type == "ch" and id == recv_id:
notFound = False
if evr:
evr_list.append(evr)
(recv_id, _) = evr
if type == "evr" and id == recv_id:
notFound = False
except self.TimeoutException:
self.print_msg("Timeout reached, unable to find {0} ID {1}".format(type, id))
if timeout:
signal.alarm(0)
return tlm_list, evr_list
def _pop_queue(self):
"""
Grabs one event/telemetry from queue
"""
#If the queue is empty attempt to find channels in packetized data
if self._ch_listener.queue_size() == 0:
#Note: return paket not used, packets routed to ch listener
self._pk_listener.get_packet()
evr = self._ev_listener.get_event()
tlm = self._ch_listener.get_channel()
return tlm, evr
def receive(self):
"""
Grabs all telemetry and data in event listener's queue until the queue is emptied.
Return a list of telemetry and events found.
"""
tlm_list = []
evr_list = []
recv_id= ''
notFound = True
while notFound:
tlm, evr = self._pop_queue()
if tlm is None and evr is None:
break
else:
if tlm:
tlm_list.append(tlm)
(recv_id, _) = tlm
if id == recv_id and type == "ch":
notFound = False
if evr:
evr_list.append(evr)
(recv_id, _) = evr
if id == recv_id and type == "evr":
notFound = False
return tlm_list, evr_list
def flush(self):
"""
Clears the telemetry/event queue and drops all data within it.
"""
self.receive()
def list(self, kind="cmds", ids=False):
"""
Return a list of available commands, EVRs, or Channels.
@param kind: kind of list desired: cmds, evrs, channels
@param ids: if True return id numbers, else nnmonics
@return: list of items
"""
queryList = []
if kind is "cmds":
#NOTE: the dict values are ints, but they represent opcode hex values
#TODO: see if another dict has a similar pattern to evrs and channels
queryList = self._cmds.getOpCodeDict().values() if ids else self._cmds.getOpCodeDict().keys()
elif kind is "evrs":
queryList = self._events.getNameDict().keys() if ids else self._events.getNameDict().values()
elif kind is "chans":
queryList = self._channels.getNameDict().keys() if ids else self._channels.getNameDict().values()
else:
self.print_msg("Requested type is invalid.", file=sys.stderr)
return queryList
def send(self, cmd_name, args=None):
"""
Send a command to the target applicaiton.
@param cmd_name: Valid command mnemonic.
@param args: Optional argument list for the command.
"""
try:
cmd_obj = self._cmds.getCommandObj(cmd_name)
except KeyError:
self.print_msg("%s is not a valid command mnemonic. Unable to send command." | |
<gh_stars>0
# coding: utf-8
import os, json, socket, time, random
from log import Log
# 4 actions after receive an message:
# 1. general_process(data): all role will do this
# 2. leader_process(data)
# 3. candidate_process(data)
# 4. follower_process(data)
# The algorithm is based on message driven model, whenever a Node receive a message (from
# client or other Nodes), it will run in one iteration to handle and transmit this message.
class ServerNode(object):
def __init__(self, conf):
self.role = 'follower'
self.id = conf['id']
self.addr = conf['addr']
self.peers = conf['peers']
self.all_ips = {'1': '192.168.127.12', '2': '172.16.58.3', '3': '172.16.31.10'}
# initialize the socket for connection.
# ss is used to receive
self.ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.ss.bind(self.addr)
self.ss.settimeout(2)
# cs is used to send ,cs is only used in 'send(self,msg,addr)' function
self.cs = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.current_term = 0
self.voted_for = None
if not os.path.exists(self.id):
os.mkdir(self.id)
# initialize current_term, voted_for
self.load()
self.log = Log(self.id)
self.commit_index = 0
self.last_applied = 0
self.next_index = {_id: self.log.last_log_index + 1 for _id in self.peers}
self.match_index = {_id: -1 for _id in self.peers}
# indicate the leader's id, we can get leader's address by id
self.leader_id = None
self.leader_addr = None
# client request
self.client_addr = None
# request vote
self.vote_ids = {_id: 0 for _id in self.peers}
# set next leader_election time, based on raft the time is random with a bound.
self.wait_ms = (10, 20)
self.next_leader_election_time = time.time() + random.randint(*self.wait_ms)
self.next_heartbeat_time = 0
def initialize(self):
while True:
try:
try:
data, addr = self.recv()
except Exception as e:
data, addr = None, None
if data is not None and data['type'] == 'request_leader':
res_data = {'ip': self.all_ips[self.leader_id]}
self.send(res_data, addr)
continue
data = self.redirect(data, addr)
self.general_process(data)
if self.role == 'leader':
self.leader_process(data)
if self.role == 'follower':
self.follower_process(data)
if self.role == 'candidate':
self.candidate_process(data)
except Exception as e:
print(e)
self.ss.close()
self.cs.close()
def general_process(self, data):
if self.commit_index > self.last_applied:
self.last_applied = self.commit_index
if data is not None and data['type'] != 'client_append_entries' and data['term'] > self.current_term:
self.current_term = data['term']
self.role = 'follower'
self.voted_for = None
self.save()
return
def leader_process(self, data):
'''1. time over: send heart beat messages to followers'''
t = time.time()
if t > self.next_heartbeat_time:
self.next_heartbeat_time = t + random.randint(0, 5)
for dst_id in self.peers:
heartbeat = {'type': 'append_entries',
'src_id': self.id,
'dst_id': dst_id,
'term': self.current_term,
'leader_id': self.id,
'prev_log_index': self.next_index[dst_id] - 1,
'prev_log_term': self.log.get_log_term(self.next_index[dst_id] - 1),
'entries': self.log.get_entries(self.next_index[dst_id]),
'leader_commit': self.commit_index
}
self.send(heartbeat, self.peers[dst_id])
'''2. receive client request message(signal)'''
if data is not None and data['type'] == 'client_append_entries':
data['term'] = self.current_term
self.log.append_entries(self.log.last_log_index, [data])
return
'''receive append_entries_response from followers'''
if data is not None and data['type'] == 'append_entries_response' and data['term'] == self.current_term:
if data['success'] == False:
self.next_index[data['src_id']] -= 1
else:
self.match_index[data['src_id']] = self.next_index[data['src_id']]
self.next_index[data['src_id']] = self.log.last_log_index + 1
while True:
count = 0
N = self.commit_index + 1
for _id in self.match_index:
if self.match_index[_id] >= N:
count += 1
if count >= len(self.peers) // 2:
self.commit_index = N
if self.client_addr:
response = {'result': 'Success'}
self.send(response, self.client_addr)
break
else:
break
def follower_process(self, data):
'''
follower process.
'''
election_time = time.time()
if data is not None:
if data['type'] == 'append_entries':
if data['term'] == self.current_term:
self.next_leader_election_time = election_time + random.randint(*self.wait_ms)
self.append_entries(data)
elif data['type'] == 'request_vote':
self.request_vote(data)
# time out: become candidate enroll in election.
if election_time > self.next_leader_election_time:
self.next_leader_election_time = election_time + random.randint(*self.wait_ms)
self.role = 'candidate'
self.current_term += 1
self.voted_for = self.id
self.save()
self.vote_ids = {_id: 0 for _id in self.peers}
return
def candidate_process(self, data):
'''
candidate process, request vote, maybe become leader.
'''
election_time = time.time()
for dst_id in self.peers:
if self.vote_ids[dst_id] == 0:
request = {
'type': 'request_vote',
'src_id': self.id,
'dst_id': dst_id,
'term': self.current_term,
'candidate_id': self.id,
'last_log_index': self.log.last_log_index,
'last_log_term': self.log.last_log_term
}
self.send(request, self.peers[dst_id])
if data is not None and data['term'] == self.current_term:
# 1. receive votes: enroll in election.
if data['type'] == 'request_vote_response':
self.vote_ids[data['src_id']] = data['vote_granted']
vote_count = sum(list(self.vote_ids.values()))
if vote_count >= len(self.peers) // 2:
self.role = 'leader'
self.voted_for = None
self.save()
self.next_heartbeat_time = 0
self.next_index = {_id: self.log.last_log_index + 1 for _id in self.peers}
self.match_index = {_id: 0 for _id in self.peers}
return
# 2. receive current leader message: become follower of current leader (Current leader didn't die).
elif data['type'] == 'append_entries':
self.next_leader_election_time = election_time + random.randint(*self.wait_ms)
self.role = 'follower'
self.voted_for = None
self.save()
return
# time out, next round election.
if election_time > self.next_leader_election_time:
self.next_leader_election_time = election_time + random.randint(*self.wait_ms)
self.role = 'candidate'
self.current_term += 1
self.voted_for = self.id
self.save()
self.vote_ids = {_id: 0 for _id in self.peers}
return
def load(self):
# load the Node current state on local machine
file_path = self.id + '/state.json'
if os.path.exists(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
self.current_term = data['current_term']
self.voted_for = data['voted_for']
else:
self.save()
def save(self):
data = {'current_term': self.current_term,
'current_role': self.role,
'voted_for': self.voted_for,
}
file_path = self.id + '/state.json'
with open(file_path, 'w') as f:
json.dump(data, f)
def send(self, msg, addr):
msg = json.dumps(msg).encode('utf-8')
self.cs.sendto(msg, addr)
def recv(self):
# recv self.ss
# recvfrom(65535) The maximum amount of data that can be received is 65535 bytes.
msg, addr = self.ss.recvfrom(65535)
return json.loads(msg), addr
def redirect(self, data, addr):
# Servers except leader can't handle the client request but only resend (redirect) to the leader.
# Or it's just a mis-sending message, redirect to the right receiver.
if data == None:
return None
'''client requset '''
if data['type'] == 'client_append_entries' and self.role != 'leader':
if self.leader_id:
# if current role is not leader but it knows leader's ID:
# - send the data to leader.
self.send(data, self.peers[self.leader_id])
else:
# if current role is not leader but it don't know leader's ID:
# - return None and do nothing.
return None
if data['type'] == 'client_append_entries' and self.role == 'leader':
# if current role is exactly the leader
# - remember the client address then start append_entries in leader()
self.client_addr = addr
return data
'''Node inter-messgae'''
if data['dst_id'] != self.id:
# if this message is to another Nonde, resend the message to that Node.
self.send(data, self.peers[data['dst_id']])
return None
else:
return data
return data
def append_entries(self, data):
''''
After leader selection, the server group can process client request.
(a) When the leader receive an client request, it will add add about this request in it's own Log, then send
data with 'append_entries' type to followers.
(b) When followers receive the the 'append_entries' data from leader, it will judge whether it can agree with
the client request responding to this data. And send the respond to leader
(c) If the result is it can agree with it, the follower will add this request to it's Log.
* This function is only used in followers.
'''
response = {'type': 'append_entries_response',
'dst_id': data['src_id'],
'src_id': self.id,
'term': self.current_term,
'success': False
}
# 1. If the append_entry has smaller term, follower will return data['success'] = False to leader.
if data['term'] < self.current_term:
response['success'] = False
self.send(response, self.peers[data['src_id']])
return
self.leader_id = data['leader_id']
# 2. If the append_entry is an heartbeat from leader, the follower will do nothing in append_entries.
if data['entries'] is []:
return
index_last = data['prev_log_index']
term_last = data['prev_log_term']
current_term_last = self.log.get_log_term(index_last)
# 3. If the follower's term is different with leader's , follower will delete the last log in it's Log file
# to keep consensus with leader.
if current_term_last != term_last:
response['success'] = False
self.send(response, self.peers[data['src_id']])
self.log.delete_entries(index_last)
# all success and save the log.
else:
response['success'] = True
self.send(response, self.peers[data['src_id']])
self.log.append_entries(index_last, data['entries'])
leader_commit = data['leader_commit']
if leader_commit > self.commit_index:
commit_index = min(leader_commit, self.log.last_log_index)
self.commit_index = commit_index
return
def request_vote(self, data):
'''
In follower_process to handle a message with 'request_vote' type
'''
response = {'type': 'request_vote_response',
'src_id': self.id,
'dst_id': data['src_id'],
'term': self.current_term,
'vote_granted': False
}
if data['term'] < self.current_term:
response['vote_granted'] = False
self.send(response, self.peers[data['src_id']])
return
candidate_id = data['candidate_id']
index_last = data['last_log_index']
term_last = data['last_log_term']
if self.voted_for is None or self.voted_for == candidate_id:
# 1. vote for the candidate
if index_last >= self.log.last_log_index and term_last >= self.log.last_log_term:
self.voted_for = data['src_id']
self.save()
response['vote_granted'] = True
self.send(response, self.peers[data['src_id']])
# 2. dont vote because | |
be a pandas Series"):
assert_equal(n1, s1)
with pytest.raises(
AssertionError,
match=r"Actual should be a numpy array"):
assert_equal(s1, n1)
# properly identifies location in dict
with pytest.raises(
AssertionError,
match=r"Error at dictionary location: dict\['a'\]\['a'\]\['a'\].\n"
r"Actual should be a pandas DataFrame, found df1\."):
assert_equal(
actual={
"a": {
"a": {
"a": "df1" # mismatch
},
"b": s1
},
"b": "string",
"c": 1
},
expected={
"a": {
"a": {
"a": df2
},
"b": s2
},
"b": "string",
"c": 1
})
with pytest.raises(
AssertionError,
match=r"Error at dictionary location: dict\['d'\].\n"
r"Actual should be a pandas DataFrame, found df1\."):
assert_equal(
actual=[1, {
"a": {
"a": {
"a": df1
},
"b": s1
},
"b": "string",
"c": 1,
"d": "df1" # mismatch
}],
expected=[1, {
"a": {
"a": {
"a": df1
},
"b": s2
},
"b": "string",
"c": 1,
"d": df1
}])
def test_assert_equal_ignore_keys():
"""Tests assert_equal ignore_keys parameter"""
actual = {
"a": {
"a": {
"a": 1
},
"b": 2,
"c": "3" # matches expected
},
"b": 3,
"c": [{
"a": 4
}]
}
expected = {
"a": {
"a": {
"a": "1"
},
"b": "2",
"c": "3"
},
"b": "3",
"c": [{
"a": "4"
}]
}
# ignores keys at the first level
assert_equal(
actual,
expected,
ignore_keys={"a": None, "b": None, "c": None}
)
# ignores keys at the second level.
# the value can be anything that's not a dict
assert_equal(
actual,
expected,
ignore_keys={"a": {"a": False, "b": True}, "b": None, "c": "not_a_dict"}
)
# ignores keys at the third level
assert_equal(
actual,
expected,
ignore_keys={"a": {"a": {"a": None}, "b": None}, "b": None, "c": None}
)
with pytest.raises(AssertionError, match=r"Error at dictionary location: dict\['c'\]\['a'\]"):
# "a" is ignored, but ["c"]["a"] is not (keys are not confused)
# the error message on ["c"]["a"] is unconventional, since dict["c"] is a list
assert_equal(
actual,
expected,
ignore_keys={"a": {"a": {"a": None}, "b": None}, "b": None}
)
with pytest.raises(AssertionError, match=r"Error at dictionary location: dict\['a'\]\['a'\]\['a'\]"):
# ["a"]["a"]["b"] is ignored, but not ["a"]["a"]["a"]
assert_equal(
actual,
expected,
ignore_keys={"a": {"a": {"b": None}, "b": None}, "b": None, "c": None}
)
with pytest.raises(AssertionError, match=r"Error at dictionary location: dict\['a'\]\['b'\]"):
# ["a"]["a"]["a"] is ignored, but not ["a"]["b"]
assert_equal(
actual,
expected,
ignore_keys={"a": {"a": {"a": None}}, "b": None, "c": None}
)
with pytest.raises(AssertionError, match=r"Error at dictionary location: dict\['c'\]\['a'\]"):
with pytest.warns(Warning) as record:
# can't ignore keys within in a list (because there are no keys to ignore)
assert_equal(
actual,
expected,
ignore_keys={"a": {"a": {"a": None}, "b": None}, "b": None, "c": {"a": None}}
)
assert r"At dictionary location: dict['c']. `ignore_keys` is {'a': None}, " \
r"but found a list. No keys will be ignored" in record[0].message.args[0]
with pytest.raises(AssertionError, match=r"Actual should be a list or tuple"):
# what is ignored is based on expected, not actual
assert_equal(
actual,
[expected],
ignore_keys={"a": {"a": {"a": None}, "b": None}, "b": None}
)
def test_dictionary_values_to_lists():
"""Tests dictionary_values_to_lists"""
exponential_distribution = scipy.stats.expon(scale=.1)
hyperparameter_grid = {
"param1": [],
"param2": [None],
"param3": ["value1", "value2"],
"param4": [[1, 2], [3, 4]],
"param5": [1],
"param6": [[1], 2, [3]],
"param7": [[1], None, [3]],
"param8": (1, 2, 3),
"param9": None,
"param10": [None, ["US", "UK"]],
"param11": [None, "auto", "special_value"],
"param12": [None, "auto", ["US", "UK"]],
"param13": 1.5,
"param14": exponential_distribution,
"param15": {"k": "v"},
"param16": np.array([1, 2, 3]),
"param17": pd.DataFrame([1, 2, 3]),
}
original_grid = hyperparameter_grid.copy()
# 1) None for `hyperparameters_list_type`
result = dictionary_values_to_lists(hyperparameter_grid)
expected_grid = {
"param1": [],
"param2": [None],
"param3": ["value1", "value2"],
"param4": [[1, 2], [3, 4]],
"param5": [1],
"param6": [[1], 2, [3]],
"param7": [[1], None, [3]],
"param8": (1, 2, 3),
"param9": [None],
"param10": [None, ["US", "UK"]],
"param11": [None, "auto", "special_value"],
"param12": [None, "auto", ["US", "UK"]],
"param13": [1.5],
"param14": exponential_distribution,
"param15": [{"k": "v"}],
"param16": [np.array([1, 2, 3])],
"param17": [pd.DataFrame([1, 2, 3])]
}
assert_equal(result, expected_grid)
# Original dictionary is not modified
assert_equal(hyperparameter_grid, original_grid)
# 2) Set for `hyperparameters_list_type` (param1 to param12)
hyperparameters_list_type = set({
f"param{i+1}" for i in range(12)
})
result = dictionary_values_to_lists(
hyperparameter_grid,
hyperparameters_list_type=hyperparameters_list_type
)
# Param 4, 9, 11 are already in the proper format
expected_grid = {
"param1": [[]],
"param2": [None],
"param3": [["value1", "value2"]],
"param4": [[1, 2], [3, 4]],
"param5": [[1]],
"param6": [[[1], 2, [3]]],
"param7": [[1], None, [3]],
"param8": [(1, 2, 3)],
"param9": [None],
"param10": [None, ["US", "UK"]],
"param11": [[None, "auto", "special_value"]], # converted to list
"param12": [[None, "auto", ["US", "UK"]]], # converted to list
"param13": [1.5],
"param14": exponential_distribution,
"param15": [{"k": "v"}],
"param16": [np.array([1, 2, 3])],
"param17": [pd.DataFrame([1, 2, 3])]
}
assert_equal(result, expected_grid)
# Original dictionary is not modified
assert_equal(hyperparameter_grid, original_grid)
# 3) Dict for `hyperparameters_list_type`
hyperparameters_list_type = {
k: [None] for k in hyperparameters_list_type
}
hyperparameters_list_type["param11"] = [None, "auto", "special_value"]
hyperparameters_list_type["param12"] = [None, "auto", "special_value"]
result = dictionary_values_to_lists(
hyperparameter_grid,
hyperparameters_list_type=hyperparameters_list_type
)
expected_grid = {
"param1": [[]],
"param2": [None],
"param3": [["value1", "value2"]],
"param4": [[1, 2], [3, 4]],
"param5": [[1]],
"param6": [[[1], 2, [3]]],
"param7": [[1], None, [3]],
"param8": [(1, 2, 3)],
"param9": [None],
"param10": [None, ["US", "UK"]],
"param11": [None, "auto", "special_value"],
"param12": [None, "auto", ["US", "UK"]],
"param13": [1.5],
"param14": exponential_distribution,
"param15": [{"k": "v"}],
"param16": [np.array([1, 2, 3])],
"param17": [pd.DataFrame([1, 2, 3])]
}
assert_equal(result, expected_grid)
# Original dictionary is not modified
assert_equal(hyperparameter_grid, original_grid)
# Checks exception
with pytest.raises(
ValueError,
match=r"The value for param1 must be a list, tuple, or one of \[None\], found {'k': 'v'}."):
hyperparameter_grid = {"param1": {"k": "v"}}
dictionary_values_to_lists(
hyperparameter_grid,
hyperparameters_list_type={"param1"})
def test_dictionaries_values_to_lists():
"""Tests dictionaries_values_to_lists"""
hyperparameter_grid = {
"param1": [],
"param2": [],
"param3": [None],
"param4": [None],
}
original_grid = hyperparameter_grid.copy()
result = dictionaries_values_to_lists(
hyperparameter_grid,
hyperparameters_list_type={"param2", "param4"})
expected_grid = {
"param1": [],
"param2": [[]],
"param3": [None],
"param4": [None],
}
assert_equal(result, expected_grid)
assert_equal(hyperparameter_grid, original_grid)
hyperparameter_grids = [
hyperparameter_grid,
hyperparameter_grid
]
original_grid = hyperparameter_grids.copy()
result = dictionaries_values_to_lists(
hyperparameter_grids,
hyperparameters_list_type={"param2", "param4"})
assert_equal(result, [expected_grid, expected_grid])
assert_equal(hyperparameter_grids, original_grid)
def test_unique_in_list():
"""Tests unique in list"""
assert unique_in_list(None) is None
assert unique_in_list([]) is None
assert unique_in_list([0, 1, 2]) == [0, 1, 2]
assert unique_in_list([0, 1, 2, [3]]) == [0, 1, 2, 3]
assert unique_in_list(
[0, 1, 2, [3]],
ignored_elements=(1, 3)) == [0, 2]
assert unique_in_list(
[0, 0, [None], [[None]], [[[None]]]],
ignored_elements=(None,)) == [0]
assert unique_in_list(
[0, 0, [None], [[None]]],
ignored_elements=(0, None,)) is None
def test_flatten_list():
"""Tests flatten_list"""
assert flatten_list([[]]) == []
assert flatten_list([[0, 1, 2, 3]]) == [0, 1, 2, 3]
assert flatten_list([[0], [1], [2], [3]]) == [0, 1, 2, 3]
assert flatten_list([[0, 1], [2, 3]]) == [0, 1, 2, 3]
def test_reorder_columns():
"""Tests reorder_columns"""
df = pd.DataFrame(np.random.randn(3, 4), columns=list("abcd"))
reordered_df = reorder_columns(df, order_dict=None)
assert_equal(df, reordered_df)
order_dict = {
"a": 3,
"b": -1,
"c": 5,
"d": 2}
reordered_df = reorder_columns(df, order_dict=order_dict)
assert_equal(df[["b", "d", "a", "c"]], reordered_df)
def test_apply_func_to_columns():
"""Tests apply_func_to_columns function"""
# cols can be index values
row = pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"])
fn = apply_func_to_columns(
row_func=ElementwiseEvaluationMetricEnum.Residual.get_metric_func(),
cols=["a", "b"])
assert fn(row) == row["a"] - row["b"]
fn = apply_func_to_columns(
row_func=ElementwiseEvaluationMetricEnum.Residual.get_metric_func(),
cols=["c", "b"])
assert fn(row) == row["c"] - row["b"]
# cols can be to dict keys
row = {"a": 1.0, "b": 2.0, "c": 3.0}
assert fn(row) == row["c"] - row["b"]
# cols can be list indices
row = [4.0, 8.0, 5.0]
fn = apply_func_to_columns(
row_func=ElementwiseEvaluationMetricEnum.Residual.get_metric_func(),
cols=[2, 1])
assert fn(row) == row[2] - row[1]
def test_get_integer():
"""Tests get_integer function"""
with pytest.warns(Warning) as record:
assert get_integer(None, "val", min_value=10, default_value=20) == 20
assert get_integer(11, "val", min_value=10, default_value=20) == 11
assert get_integer(10.5, "val", min_value=10, default_value=20) == 10
assert "val converted to integer 10 from 10.5" in record[0].message.args[0]
with pytest.raises(ValueError, match="val must be an integer"):
get_integer("q", "val")
with pytest.raises(ValueError, match="val must be >= 1"):
get_integer(0, "val", min_value=1)
with pytest.raises(ValueError, match="val must be >= 1"):
get_integer(None, "val", min_value=1, default_value=0)
def test_mutable_field():
@dataclass
class D:
x: List = mutable_field([1, 2, 3])
assert D().x is not D().x
assert D().x == [1, 2, 3]
def test_ignore_warnings():
"""Tests ignore_warnings"""
# warnings suppressed
@ignore_warnings(FutureWarning)
def func(a, b, c=1):
warnings.warn("warning message", FutureWarning)
return f"{a} {b} {c}"
with pytest.warns(None):
assert func(a=1, b=2) == "1 2 1"
# warnings not suppressed
@ignore_warnings(ImportWarning)
def func2(a, b, c=1):
warnings.warn("warning message", FutureWarning)
return f"{a} {b} {c}"
with pytest.warns(FutureWarning) as record:
assert func2(a=1, b=2, c=3) == "1 2 3"
assert "warning message" in record[0].message.args[0]
def test_group_strs_with_regex_patterns():
"""Tests ``group_strs_with_regex_patterns``."""
# Example 1
strings = ["sd", "sd1", "rr", "urr", "sd2", "uu"]
regex_patterns = | |
import os, re, traceback;
from mBugId import cBugId;
from mConsole import oConsole;
from mFileSystemItem import cFileSystemItem;
import mGlobals;
from mBugId.mCP437 import fsCP437FromBytesString;
try: # mDebugOutput use is Optional
import mDebugOutput as m0DebugOutput;
except ModuleNotFoundError as oException:
if oException.args[0] != "No module named 'mDebugOutput'":
raise;
m0DebugOutput = None;
NORMAL = 0x0F07;
HILITE = 0x0F0F;
ERROR = 0x0F0C;
WARN = 0x0F06;
WARN_INFO = 0x0F0E;
mGlobals.bLicenseWarningsShown = False;
def fOutputStack(oStack):
oConsole.fOutput(HILITE, " Stack:");
for oStackFrame in oStack.aoFrames:
oConsole.fOutput(
NORMAL, " \u2022 ",
NORMAL if oStackFrame.bHidden else HILITE, fsCP437FromBytesString(oStackFrame.sb0UniqueAddress or b"---"),
NORMAL, " (cdb:", NORMAL if oStackFrame.sb0UniqueAddress else HILITE, fsCP437FromBytesString(oStackFrame.sbCdbSymbolOrAddress), NORMAL, ")",
[" => ", oStackFrame.s0IsHiddenBecause] if oStackFrame.s0IsHiddenBecause else [],
);
guExitCodeInternalError = 1; # Use standard value;
def fRunASingleTest(
sISA,
axCommandLineArguments,
a0sExpectedBugIdAndLocations,
sExpectedFailedToDebugApplicationErrorMessage = None,
bRunInShell = False,
s0ApplicationBinaryPath = None,
bASan = False,
uMaximumNumberOfBugs = 2,
bExcessiveCPUUsageChecks = False
):
asApplicationArguments = axCommandLineArguments and [
isinstance(x, str) and x
or x < 10 and ("%d" % x)
or ("0x%X" % x)
for x in axCommandLineArguments
] or [];
assert s0ApplicationBinaryPath is None or not bASan, \
"Setting bASan when supplying an application binary path makes no sense";
sApplicationBinaryPath = (
s0ApplicationBinaryPath if s0ApplicationBinaryPath is not None else
mGlobals.dsASanTestsBinaries_by_sISA[sISA] if bASan else
mGlobals.dsTestsBinaries_by_sISA[sISA]
);
asCommandLine = [sApplicationBinaryPath] + asApplicationArguments;
sFailedToDebugApplicationErrorMessage = None;
if sExpectedFailedToDebugApplicationErrorMessage:
sTestDescription = "%s => %s" % (
"Running %s" % sApplicationBinaryPath,
repr(sExpectedFailedToDebugApplicationErrorMessage)
);
else:
sTestDescription = "%s%s %s%s => %s" % (
sISA,
" ASan" if bASan else "",
" ".join(asApplicationArguments), \
bRunInShell and " (in child process)" or "",
a0sExpectedBugIdAndLocations and " => ".join(a0sExpectedBugIdAndLocations) or "no bugs"
);
sTestBinaryName = os.path.basename(sApplicationBinaryPath).lower();
if bRunInShell:
asApplicationArguments = ["/C", sApplicationBinaryPath] + asApplicationArguments;
sApplicationBinaryPath = mGlobals.dsComSpec_by_sISA[sISA];
oConsole.fSetTitle(sTestDescription);
if mGlobals.bDebugStartFinish:
oConsole.fOutput("→ Started %s" % sTestDescription);
else:
oConsole.fStatus("► %s" % sTestDescription);
asLog = [];
def fCdbStdInInputCallback(oBugId, sbInput):
sInput = fsCP437FromBytesString(sbInput);
if mGlobals.bShowCdbIO: oConsole.fOutput(" stdin<%s" % sInput);
asLog.append("stdin<%s" % sInput);
def fCdbStdOutOutputCallback(oBugId, sbOutput):
sOutput = fsCP437FromBytesString(sbOutput);
if mGlobals.bShowCdbIO: oConsole.fOutput(" stdout>%s" % sOutput);
asLog.append("stdout>%s" % sOutput);
def fCdbStdErrOutputCallback(oBugId, sbOutput):
sOutput = fsCP437FromBytesString(sbOutput);
if mGlobals.bShowCdbIO: oConsole.fOutput(" stderr>%s" % sOutput);
asLog.append("stderr>%s" % sOutput);
# asLog.append("log>%s%s" % (sMessage, sData and " (%s)" % sData or ""));
def fApplicationDebugOutputCallback(oBugId, oProcess, bIsMainProcess, asOutput):
bFirstLine = True;
for sOutput in asOutput:
sLogLine = "%s process 0x%X (%s): %s>%s" % (
bIsMainProcess and "Main" or "Sub", \
oProcess.uId,
oProcess.sCommandLine,
bFirstLine and "debug" or " ",
sOutput
);
if mGlobals.bShowApplicationIO: oConsole.fOutput(sLogLine);
asLog.append(sLogLine);
bFirstLine = False;
def fApplicationStdErrOutputCallback(oBugId, oConsoleProcess, bIsMainProcess, sOutput):
# This is always a main process
sLogLine = "%s process 0x%X (%s): stderr> %s" % (
bIsMainProcess and "Main" or "Sub",
oConsoleProcess.uId,
oConsoleProcess.sCommandLine,
sOutput
);
if mGlobals.bShowApplicationIO: oConsole.fOutput(sLogLine);
asLog.append(sLogLine);
def fApplicationStdOutOutputCallback(oBugId, oConsoleProcess, bIsMainProcess, sOutput):
# This is always a main process
sLogLine = "%s process 0x%X (%s): stdout> %s" % (
bIsMainProcess and "Main" or "Sub",
oConsoleProcess.uId,
oConsoleProcess.sCommandLine,
sOutput
)
if mGlobals.bShowApplicationIO: oConsole.fOutput(sLogLine);
asLog.append(sLogLine);
def fApplicationSuspendedCallback(oBugId, sReason):
asLog.append("Application suspended (%s)" % sReason);
def fApplicationResumedCallback(oBugId):
asLog.append("Application resumed");
def fApplicationRunningCallback(oBugId):
asLog.append("Application running");
def fFailedToDebugApplicationCallback(oBugId, sErrorMessage):
if sExpectedFailedToDebugApplicationErrorMessage == sErrorMessage:
return;
if not mGlobals.bShowCdbIO:
for sLine in asLog:
oConsole.fOutput(sLine);
oConsole.fOutput(ERROR, "- Failed test: %s" % sTestDescription);
if sExpectedFailedToDebugApplicationErrorMessage:
oConsole.fOutput(ERROR, " Expected: %s" % repr(sExpectedFailedToDebugApplicationErrorMessage));
else:
oConsole.fOutput(ERROR, " BugId unexpectedly failed to debug the application");
oConsole.fOutput(ERROR, " Error: %s" % repr(sErrorMessage));
oBugId.fStop();
raise AssertionError(sErrorMessage);
def fFailedToApplyMemoryLimitsCallback(oBugId, oProcess):
if not mGlobals.bShowCdbIO:
for sLine in asLog:
oConsole.fOutput(ERROR, sLine);
oConsole.fOutput(ERROR, "- Failed to apply memory limits to process 0x%X (%s) for test: %s" % (
oProcess.uId,
oProcess.sCommandLine,
sTestDescription
));
oBugId.fStop();
raise AssertionError("Failed to apply memory limits to process");
def fFinishedCallback(oBugId):
if mGlobals.bShowCdbIO: oConsole.fOutput(" Finished");
asLog.append("Finished");
def fLicenseWarningsCallback(oBugId, asLicenseWarnings):
if not mGlobals.bLicenseWarningsShown:
oConsole.fOutput(WARN, "\u2554\u2550\u2550[ ", WARN_INFO, "License warning", WARN, " ]", sPadding = "\u2550");
for sLicenseWarning in asLicenseWarnings:
oConsole.fOutput(WARN, "\u2551 ", WARN_INFO, sLicenseWarning);
oConsole.fOutput(WARN, "\u255A", sPadding = "\u2550");
mGlobals.bLicenseWarningsShown = True;
def fLicenseErrorsCallback(oBugId, asLicenseErrors):
oConsole.fOutput(ERROR, "\u2554\u2550\u2550[ ", ERROR_INFO, "License warning", ERROR, " ]", sPadding = "\u2550");
for sLicenseError in asLicenseErrors:
oConsole.fOutput(ERROR, "\u2551 ", ERROR_INFO, sLicenseError);
oConsole.fOutput(ERROR, "\u255A", sPadding = "\u2550");
os._exit(1);
def fInternalExceptionCallback(oBugId, oThread, oException, oTraceBack):
if not mGlobals.bShowCdbIO:
for sLine in asLog:
oConsole.fOutput(sLine);
oBugId.fStop();
if m0DebugOutput:
m0DebugOutput.fTerminateWithException(oException, guExitCodeInternalError, bShowStacksForAllThread = True);
raise oException;
def fPageHeapNotEnabledCallback(oBugId, oProcess, bIsMainProcess, bPreventable):
assert oProcess.sBinaryName == "cmd.exe", \
"It appears you have not enabled page heap for %s, which is required to run tests." % oProcess.sBinaryName;
def fProcessAttachedCallback(oBugId, oProcess, bIsMainProcess):
asLog.append("%s process 0x%X (%s): attached." % (
bIsMainProcess and "Main" or "Sub",
oProcess.uId,
oProcess.sCommandLine
));
def fProcessStartedCallback(oBugId, oConsoleProcess, bIsMainProcess):
# This is always a main process
asLog.append("%s process 0x%X (%s): started." % (
bIsMainProcess and "Main" or "Sub",
oConsoleProcess.uId,
oConsoleProcess.sCommandLine
));
def fProcessTerminatedCallback(oBugId, oProcess, bIsMainProcess):
asLog.append("%s process 0x%X (%s): terminated." % (
bIsMainProcess and "Main" or "Sub",
oProcess.uId,
oProcess.sCommandLine
));
def fLogMessageCallback(oBugId, sMessage, dsData = None):
sData = dsData and ", ".join(["%s: %s" % (sName, sValue) for (sName, sValue) in dsData.items()]);
sLogLine = "log>%s%s" % (sMessage, sData and " (%s)" % sData or "");
if mGlobals.bShowCdbIO: oConsole.fOutput(sLogLine);
asLog.append(sLogLine);
aoBugReports = [];
def fBugReportCallback(oBugId, oBugReport):
aoBugReports.append(oBugReport);
if mGlobals.bShowCdbIO:
oConsole.fOutput();
oConsole.fOutput("=" * 80);
oConsole.fOutput("%s %s" % (sApplicationBinaryPath, " ".join(asApplicationArguments)));
if a0sExpectedBugIdAndLocations:
for sExpectedBugIdAndLocation in a0sExpectedBugIdAndLocations:
oConsole.fOutput(" => %s" % sExpectedBugIdAndLocation);
oConsole.fOutput("-" * 80);
bBugIdStarted = False;
bBugIdStopped = False;
try:
oBugId = cBugId(
sCdbISA = sISA, # Debug with a cdb.exe for an ISA that matches the target process.
s0ApplicationBinaryPath = sApplicationBinaryPath,
asApplicationArguments = asApplicationArguments,
azsSymbolServerURLs = ["http://msdl.microsoft.com/download/symbols"], # Will be ignore if symbols are disabled.
bGenerateReportHTML = mGlobals.bGenerateReportHTML,
u0TotalMaxMemoryUse = mGlobals.uTotalMaxMemoryUse,
u0MaximumNumberOfBugs = uMaximumNumberOfBugs,
);
oBugId.fAddCallback("Application resumed", fApplicationResumedCallback);
oBugId.fAddCallback("Application running", fApplicationRunningCallback);
oBugId.fAddCallback("Application debug output", fApplicationDebugOutputCallback);
oBugId.fAddCallback("Application stderr output", fApplicationStdErrOutputCallback);
oBugId.fAddCallback("Application stdout output", fApplicationStdOutOutputCallback);
oBugId.fAddCallback("Application suspended", fApplicationSuspendedCallback);
oBugId.fAddCallback("Bug report", fBugReportCallback);
oBugId.fAddCallback("Cdb stderr output", fCdbStdErrOutputCallback);
oBugId.fAddCallback("Cdb stdin input", fCdbStdInInputCallback);
oBugId.fAddCallback("Cdb stdout output", fCdbStdOutOutputCallback);
oBugId.fAddCallback("Failed to apply application memory limits", fFailedToApplyMemoryLimitsCallback);
oBugId.fAddCallback("Failed to apply process memory limits", fFailedToApplyMemoryLimitsCallback);
oBugId.fAddCallback("Failed to debug application", fFailedToDebugApplicationCallback);
oBugId.fAddCallback("Finished", fFinishedCallback);
oBugId.fAddCallback("Internal exception", fInternalExceptionCallback);
oBugId.fAddCallback("License warnings", fLicenseWarningsCallback);
oBugId.fAddCallback("License errors", fLicenseErrorsCallback);
oBugId.fAddCallback("Page heap not enabled", fPageHeapNotEnabledCallback);
oBugId.fAddCallback("Process attached", fProcessAttachedCallback);
oBugId.fAddCallback("Process terminated", fProcessTerminatedCallback);
oBugId.fAddCallback("Process started", fProcessStartedCallback);
oBugId.fAddCallback("Log message", fLogMessageCallback);
if bExcessiveCPUUsageChecks:
def fExcessiveCPUUsageDetectedCallback(oBugId, bExcessiveCPUUsageDetected):
if not bExcessiveCPUUsageDetected:
oBugId.fCheckForExcessiveCPUUsage(fExcessiveCPUUsageDetectedCallback);
oBugId.foSetTimeout(
sDescription = "Start check for excessive CPU usage",
nTimeoutInSeconds = mGlobals.nExcessiveCPUUsageCheckInitialTimeoutInSeconds,
f0Callback = lambda oBugId: fExcessiveCPUUsageDetectedCallback(oBugId, False),
);
oBugId.fStart();
bBugIdStarted = True;
oBugId.fWait();
bBugIdStopped = True;
if mGlobals.bShowCdbIO: oConsole.fOutput("= Finished ".ljust(80, "="));
def fDumpExpectedAndReported():
uCounter = 0;
while 1:
s0ExpectedBugIdAndLocation = a0sExpectedBugIdAndLocations[uCounter] if uCounter < len(a0sExpectedBugIdAndLocations) else None;
o0BugReport = aoBugReports[uCounter] if uCounter < len(aoBugReports) else None;
if not s0ExpectedBugIdAndLocation and not o0BugReport:
break;
uCounter += 1;
s0DetectedBugIdAndLocation = (
"%s @ %s" % (o0BugReport.sId, o0BugReport.s0BugLocation or "(unknown)") if o0BugReport is not None else
None
);
oConsole.fOutput(" Bug #%d %s:" % (
uCounter,
(
"is as expected" if s0DetectedBugIdAndLocation == s0ExpectedBugIdAndLocation else
"was not detected" if s0DetectedBugIdAndLocation is None else
"was not expected" if s0ExpectedBugIdAndLocation is None else
"has an unexpected bug id/location"
),
));
if s0ExpectedBugIdAndLocation:
oConsole.fOutput(" Expected: %s" % (repr(s0ExpectedBugIdAndLocation)));
else:
oConsole.fOutput(" Expected: no bug.");
if o0BugReport:
oConsole.fOutput(" Detected: %ss" % repr(s0DetectedBugIdAndLocation));
oConsole.fOutput(" (Description: %s)" % repr(o0BugReport.s0BugDescription));
else:
oConsole.fOutput(" Detected: no bug.");
if sExpectedFailedToDebugApplicationErrorMessage:
pass;
elif a0sExpectedBugIdAndLocations is None:
uCounter = 0;
oConsole.fOutput("→ Test results for: %s" % sTestDescription);
for oBugReport in aoBugReports:
uCounter += 1;
sBugIdAndLocation = "%s @ %s" % (oBugReport.sId, oBugReport.s0BugLocation or "(unknown)");
oConsole.fOutput(" Test bug #%d: %s." % (uCounter, repr(sBugIdAndLocation)));
if oBugReport.o0Stack:
fOutputStack(oBugReport.o0Stack);
else:
if len(aoBugReports) != len(a0sExpectedBugIdAndLocations):
if not mGlobals.bShowCdbIO:
for sLine in asLog:
oConsole.fOutput(sLine);
oConsole.fOutput(ERROR, "× Failed test: %s" % sTestDescription);
oConsole.fOutput(ERROR, " Test reported %d instead of %d bugs in the application." % (len(aoBugReports), len(a0sExpectedBugIdAndLocations)));
fDumpExpectedAndReported();
raise AssertionError("Test reported different number of bugs than was expected");
else:
uCounter = 0;
for uCounter in range(len(a0sExpectedBugIdAndLocations)):
sExpectedBugIdAndLocation = a0sExpectedBugIdAndLocations[uCounter];
rExpectedBugIdAndLocation = re.compile("^(%s)$" % sExpectedBugIdAndLocation.replace("<binary>", re.escape(sTestBinaryName)));
oBugReport = aoBugReports[uCounter];
s0DetectedBugIdAndLocation = "%s @ %s" % (oBugReport.sId, oBugReport.s0BugLocation or "(unknown)");
if not rExpectedBugIdAndLocation.match(s0DetectedBugIdAndLocation):
if not mGlobals.bShowCdbIO:
for sLine in asLog:
oConsole.fOutput(ERROR, sLine);
oConsole.fOutput(ERROR, "× Failed test: %s" % sTestDescription);
oConsole.fOutput(ERROR, " Test bug #%d does not match %s." % (uCounter, repr(sExpectedBugIdAndLocation)));
fDumpExpectedAndReported()
if oBugReport.o0Stack:
| |
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from visdom import Visdom
import argparse
import numpy as np
import math
import os.path
import time
import tempfile
from six.moves import urllib
def run_demo(viz):
global input
assert viz.check_connection(timeout_seconds=3), \
'No connection could be formed quickly'
textwindow = viz.text('Hello World!')
updatetextwindow = viz.text('Hello World! More text should be here')
assert updatetextwindow is not None, 'Window was none'
viz.text('And here it is', win=updatetextwindow, append=True)
# text window with Callbacks
txt = 'This is a write demo notepad. Type below. Delete clears text:<br>'
callback_text_window = viz.text(txt)
def type_callback(event):
if event['event_type'] == 'KeyPress':
curr_txt = event['pane_data']['content']
if event['key'] == 'Enter':
curr_txt += '<br>'
elif event['key'] == 'Backspace':
curr_txt = curr_txt[:-1]
elif event['key'] == 'Delete':
curr_txt = txt
elif len(event['key']) == 1:
curr_txt += event['key']
viz.text(curr_txt, win=callback_text_window)
viz.register_event_handler(type_callback, callback_text_window)
# matplotlib demo:
try:
import matplotlib.pyplot as plt
plt.plot([1, 23, 2, 4])
plt.ylabel('some numbers')
viz.matplot(plt)
except BaseException as err:
print('Skipped matplotlib example')
print('Error message: ', err)
# video demo:
try:
video = np.empty([256, 250, 250, 3], dtype=np.uint8)
for n in range(256):
video[n, :, :, :].fill(n)
viz.video(tensor=video)
except BaseException:
print('Skipped video tensor example')
try:
# video demo:
# download video from http://media.w3.org/2010/05/sintel/trailer.ogv
video_url = 'http://media.w3.org/2010/05/sintel/trailer.ogv'
videofile = os.path.join(tempfile.gettempdir(), 'trailer.ogv')
urllib.request.urlretrieve(video_url, videofile)
if os.path.isfile(videofile):
viz.video(videofile=videofile, opts={'width': 864, 'height': 480})
except BaseException:
print('Skipped video file example')
# image demo
viz.image(
np.random.rand(3, 512, 256),
opts=dict(title='Random!', caption='How random.'),
)
# image demo save as jpg
viz.image(
np.random.rand(3, 512, 256),
opts=dict(title='Random image as jpg!', caption='How random as jpg.', jpgquality=50),
)
# image history demo
viz.image(
np.random.rand(3, 512, 256),
win='image_history',
opts=dict(caption='First random', store_history=True, title='Pick your random!'),
)
viz.image(
np.random.rand(3, 512, 256),
win='image_history',
opts=dict(caption='Second random!', store_history=True),
)
# grid of images
viz.images(
np.random.randn(20, 3, 64, 64),
opts=dict(title='Random images', caption='How random.')
)
# scatter plots
Y = np.random.rand(100)
old_scatter = viz.scatter(
X=np.random.rand(100, 2),
Y=(Y[Y > 0] + 1.5).astype(int),
opts=dict(
legend=['Didnt', 'Update'],
xtickmin=-50,
xtickmax=50,
xtickstep=0.5,
ytickmin=-50,
ytickmax=50,
ytickstep=0.5,
markersymbol='cross-thin-open',
),
)
viz.update_window_opts(
win=old_scatter,
opts=dict(
legend=['Apples', 'Pears'],
xtickmin=0,
xtickmax=1,
xtickstep=0.5,
ytickmin=0,
ytickmax=1,
ytickstep=0.5,
markersymbol='cross-thin-open',
),
)
# 3d scatterplot with custom labels and ranges
viz.scatter(
X=np.random.rand(100, 3),
Y=(Y + 1.5).astype(int),
opts=dict(
legend=['Men', 'Women'],
markersize=5,
xtickmin=0,
xtickmax=2,
xlabel='Arbitrary',
xtickvals=[0, 0.75, 1.6, 2],
ytickmin=0,
ytickmax=2,
ytickstep=0.5,
ztickmin=0,
ztickmax=1,
ztickstep=0.5,
)
)
# 2D scatterplot with custom intensities (red channel)
viz.scatter(
X=np.random.rand(255, 2),
Y=(np.random.rand(255) + 1.5).astype(int),
opts=dict(
markersize=10,
markercolor=np.random.randint(0, 255, (2, 3,)),
),
)
# 2D scatter plot with custom colors per label:
viz.scatter(
X=np.random.rand(255, 2),
Y=(np.random.randn(255) > 0) + 1,
opts=dict(
markersize=10,
markercolor=np.floor(np.random.random((2, 3)) * 255),
),
)
win = viz.scatter(
X=np.random.rand(255, 2),
opts=dict(
markersize=10,
markercolor=np.random.randint(0, 255, (255, 3,)),
),
)
# assert that the window exists
assert viz.win_exists(win), 'Created window marked as not existing'
# add new trace to scatter plot
viz.scatter(
X=np.random.rand(255),
Y=np.random.rand(255),
win=win,
name='new_trace',
update='new'
)
# 2D scatter plot with text labels:
viz.scatter(
X=np.random.rand(10, 2),
opts=dict(
textlabels=['Label %d' % (i + 1) for i in range(10)]
)
)
viz.scatter(
X=np.random.rand(10, 2),
Y=[1] * 5 + [2] * 3 + [3] * 2,
opts=dict(
legend=['A', 'B', 'C'],
textlabels=['Label %d' % (i + 1) for i in range(10)]
)
)
# bar plots
viz.bar(X=np.random.rand(20))
viz.bar(
X=np.abs(np.random.rand(5, 3)),
opts=dict(
stacked=True,
legend=['Facebook', 'Google', 'Twitter'],
rownames=['2012', '2013', '2014', '2015', '2016']
)
)
viz.bar(
X=np.random.rand(20, 3),
opts=dict(
stacked=False,
legend=['The Netherlands', 'France', 'United States']
)
)
# histogram
viz.histogram(X=np.random.rand(10000), opts=dict(numbins=20))
# heatmap
viz.heatmap(
X=np.outer(np.arange(1, 6), np.arange(1, 11)),
opts=dict(
columnnames=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
rownames=['y1', 'y2', 'y3', 'y4', 'y5'],
colormap='Electric',
)
)
# contour
x = np.tile(np.arange(1, 101), (100, 1))
y = x.transpose()
X = np.exp((((x - 50) ** 2) + ((y - 50) ** 2)) / -(20.0 ** 2))
viz.contour(X=X, opts=dict(colormap='Viridis'))
# surface
viz.surf(X=X, opts=dict(colormap='Hot'))
# line plots
viz.line(Y=np.random.rand(10), opts=dict(showlegend=True))
Y = np.linspace(-5, 5, 100)
viz.line(
Y=np.column_stack((Y * Y, np.sqrt(Y + 5))),
X=np.column_stack((Y, Y)),
opts=dict(markers=False),
)
# line using WebGL
webgl_num_points = 200000
webgl_x = np.linspace(-1, 0, webgl_num_points)
webgl_y = webgl_x**3
viz.line(X=webgl_x, Y=webgl_y,
opts=dict(title='{} points using WebGL'.format(webgl_num_points), webgl=True),
win="WebGL demo")
# line updates
win = viz.line(
X=np.column_stack((np.arange(0, 10), np.arange(0, 10))),
Y=np.column_stack((np.linspace(5, 10, 10),
np.linspace(5, 10, 10) + 5)),
)
viz.line(
X=np.column_stack((np.arange(10, 20), np.arange(10, 20))),
Y=np.column_stack((np.linspace(5, 10, 10),
np.linspace(5, 10, 10) + 5)),
win=win,
update='append'
)
viz.line(
X=np.arange(21, 30),
Y=np.arange(1, 10),
win=win,
name='2',
update='append'
)
viz.line(
X=np.arange(1, 10),
Y=np.arange(11, 20),
win=win,
name='delete this',
update='append'
)
viz.line(
X=np.arange(1, 10),
Y=np.arange(11, 20),
win=win,
name='4',
update='insert'
)
viz.line(X=None, Y=None, win=win, name='delete this', update='remove')
viz.line(
X=webgl_x+1.,
Y=(webgl_x+1.)**3,
win="WebGL demo",
update='append',
opts=dict(title='{} points using WebGL'.format(webgl_num_points*2), webgl=True)
)
win = viz.line(
X=np.column_stack((
np.arange(0, 10),
np.arange(0, 10),
np.arange(0, 10),
)),
Y=np.column_stack((
np.linspace(5, 10, 10),
np.linspace(5, 10, 10) + 5,
np.linspace(5, 10, 10) + 10,
)),
opts={
'dash': np.array(['solid', 'dash', 'dashdot']),
'linecolor': np.array([
[0, 191, 255],
[0, 191, 255],
[255, 0, 0],
]),
'title': 'Different line dash types'
}
)
viz.line(
X=np.arange(0, 10),
Y=np.linspace(5, 10, 10) + 15,
win=win,
name='4',
update='insert',
opts={
'linecolor': np.array([
[255, 0, 0],
]),
'dash': np.array(['dot']),
}
)
Y = np.linspace(0, 4, 200)
win = viz.line(
Y=np.column_stack((np.sqrt(Y), np.sqrt(Y) + 2)),
X=np.column_stack((Y, Y)),
opts=dict(
fillarea=True,
showlegend=False,
width=800,
height=800,
xlabel='Time',
ylabel='Volume',
ytype='log',
title='Stacked area plot',
marginleft=30,
marginright=30,
marginbottom=80,
margintop=30,
),
)
# Assure that the stacked area plot isn't giant
viz.update_window_opts(
win=win,
opts=dict(
width=300,
height=300,
),
)
# boxplot
X = np.random.rand(100, 2)
X[:, 1] += 2
viz.boxplot(
X=X,
opts=dict(legend=['Men', 'Women'])
)
# stemplot
Y = np.linspace(0, 2 * math.pi, 70)
X = np.column_stack((np.sin(Y), np.cos(Y)))
viz.stem(
X=X,
Y=Y,
opts=dict(legend=['Sine', 'Cosine'])
)
# quiver plot
X = np.arange(0, 2.1, .2)
Y = np.arange(0, 2.1, .2)
X = np.broadcast_to(np.expand_dims(X, axis=1), (len(X), len(X)))
Y = np.broadcast_to(np.expand_dims(Y, axis=0), (len(Y), len(Y)))
U = np.multiply(np.cos(X), Y)
V = np.multiply(np.sin(X), Y)
viz.quiver(
X=U,
Y=V,
opts=dict(normalize=0.9),
)
# pie chart
X = np.asarray([19, 26, 55])
viz.pie(
X=X,
opts=dict(legend=['Residential', 'Non-Residential', 'Utility'])
)
# scatter plot example with various type of updates
colors = np.random.randint(0, 255, (2, 3,))
win = viz.scatter(
X=np.random.rand(255, 2),
Y=(np.random.rand(255) + 1.5).astype(int),
opts=dict(
markersize=10,
markercolor=colors,
legend=['1', '2']
),
)
viz.scatter(
X=np.random.rand(255),
Y=np.random.rand(255),
opts=dict(
markersize=10,
markercolor=colors[0].reshape(-1, 3),
),
name='1',
update='append',
win=win)
viz.scatter(
X=np.random.rand(255, 2),
Y=(np.random.rand(255) + 1.5).astype(int),
opts=dict(
markersize=10,
markercolor=colors,
),
update='append',
win=win)
# mesh plot
x = [0, 0, 1, 1, 0, 0, 1, 1]
y = [0, 1, 1, 0, 0, 1, 1, 0]
z = [0, 0, 0, 0, 1, 1, 1, 1]
X = np.c_[x, y, z]
i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2]
j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3]
k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6]
Y = np.c_[i, j, k]
viz.mesh(X=X, Y=Y, opts=dict(opacity=0.5))
# SVG plotting
svgstr = """
<svg height="300" width="300">
<ellipse cx="80" cy="80" rx="50" ry="30"
style="fill:red;stroke:purple;stroke-width:2" />
Sorry, your browser does not support inline SVG.
</svg>
"""
viz.svg(
svgstr=svgstr,
opts=dict(title='Example of SVG Rendering')
)
# close text window:
viz.close(win=textwindow)
# assert that the closed window doesn't exist
assert not viz.win_exists(textwindow), 'Closed window still exists'
# Arbitrary visdom content
trace = dict(x=[1, 2, 3], y=[4, 5, 6], mode="markers+lines", type='custom',
marker={'color': 'red', 'symbol': 104, 'size': "10"},
text=["one", "two", "three"], name='1st Trace')
layout = dict(title="First Plot", xaxis={'title': 'x1'},
yaxis={'title': 'x2'})
viz._send({'data': [trace], 'layout': layout, 'win': 'mywin'})
# PyTorch tensor
try:
import torch
viz.line(Y=torch.Tensor([[0., 0.], [1., 1.]]))
except ImportError:
print('Skipped PyTorch example')
# audio demo:
tensor = np.random.uniform(-1, 1, 441000)
viz.audio(tensor=tensor, opts={'sample_frequency': 441000})
# audio demo:
# download from http://www.externalharddrive.com/waves/animal/dolphin.wav
try:
audio_url = 'http://www.externalharddrive.com/waves/animal/dolphin.wav'
audiofile = os.path.join(tempfile.gettempdir(), 'dolphin.wav')
urllib.request.urlretrieve(audio_url, audiofile)
if os.path.isfile(audiofile):
viz.audio(audiofile=audiofile)
except BaseException:
print('Skipped audio example')
# get/set state
import json
window = viz.text('test one')
data = json.loads(viz.get_window_data())
data[window]['content'] = 'test two'
viz.set_window_data(json.dumps(data))
try:
input = raw_input # for Python 2 compatibility
except NameError:
pass
input('Waiting for callbacks, press enter to quit.')
if __name__ == '__main__':
DEFAULT_PORT = 8097
DEFAULT_HOSTNAME = "http://localhost"
| |
number of subsets
value=0
if (options["-numsubsets"].value==None or options["-numsubsets"].value > numberpredicted):
value=numberpredicted
else:
value=options["-numsubsets"].value
#update the progress bar
if totalforbar.value==value:
update_progress(1)
print "\n"
else:
modify()
if totalforbar.value<value:
update_progress(float(totalforbar.value)/value)
#close the datafile when the loop is done
datafile.close()
'''
calculations_multiprocessing
This function creates C pointers to pass to calculations, and creates X processes to calculate all
possible permutations for this (X specified by user, or default uses all cores on computer). It
also calculates the jackknife value from all these permutations, writes these things to a file, and
creates png files of the contact matrices. Function does not return anything.
group: number of simulations to be excluded
options["-TM1l"].value: length of TM1
options["-TM2l"].value: length of TM2
TM1reslist: list of residues in TM1
TM2reslist: list of residues in TM2
allxangle: overall xangle array
thetanCMall: overall contact matrix for total N simulations
thetanCMlh: left-handed contact matrix for total N simulations
thetanCMrh: right-handed contact matrix for total N simulations
thetanlh: left-handed probability for total N simulations
thetanrh: right-handed probability for total N simulations
thetanlhmean: left-handed mean of the crossing angle for N simulations
thetanrhmean: right-handed mean of the crossing angle for N simulations
thetanlhmode: left-handed mode of the crossing angle for N simulations
thetanrhmode: right-handed mode of the crossing angle for N simulations
allcontactmatricesnumdir: array of the overall contact matrices for N simulations
lhcontactmatricesnumdir: array of the left-handed contact matrices for N simulations
rhcontactmatricesnumdir: array of the right-handed contact matrices for N simulations
arraynumframesconsideredall: array of the number of overall frames considered in each simulation
arraynumframesconsideredlh: array of the number of left-handed frames considered in each simulation
arraynumframesconsideredrh: array of the number of right-handed frames considered in each simulation
xangles: array of xangles from each of the N simulations
TM1CAs: array of CAs from TM1
TM2CAs: array of CAs from TM2
quartilefileLHprob: file containing data for the boxplots for the LH probability
quartilefileRHprob: file containing data for the boxplots for the RH probability
quartilefileLHmean: file containing data for the boxplots for the LH mean
quartilefileRHmean: file containing data for the boxplots for the RH mean
quartilefileLHmode: file containing data for the boxplots for the LH mode
quartilefileRHmode: file containing data for the boxplots for the RH mode
'''
def calculations_multiprocessing(group,TM1reslist,TM2reslist,allxangle,thetanCMall,thetanCMlh,thetanCMrh,thetanlh,thetanrh,thetanlhmean,thetanrhmean,thetanlhmode1,thetanrhmode1,thetanlhmode2,thetanrhmode2,allcontactmatricesnumdir,lhcontactmatricesnumdir,rhcontactmatricesnumdir,arraynumframesconsideredall,arraynumframesconsideredlh,arraynumframesconsideredrh,xangles,TM1CAs,TM2CAs,quartilefileLHprob,quartilefileRHprob,quartilefileLHmean,quartilefileRHmean,quartilefileLHmode1,quartilefileRHmode1,quartilefileLHmode2,quartilefileRHmode2):
#predict number of groups in chosen set
numberpredicted=factorial(options["-numdirs"].value)/(factorial(options["-numdirs"].value-group)*factorial(group))
#print information about permutation
print "groupsize = "+str(options["-numdirs"].value-group)
print "predicted permutations = "+str(numberpredicted)
if (options["-numsubsets"].value==None):
print "number selected permutations = "+str(numberpredicted)
elif (options["-numsubsets"].value>numberpredicted):
print "number selected permutations = "+str(options["-numsubsets"].value)
print
print "WARNING: the number of selected permutations you have chosen is more than the number of possible unique permutations - will only use number of predicted permutations\n"
else:
print "number selected permutations = "+str(options["-numsubsets"].value)
print
#commands to make jackknifing subdirectories, and then move all of the files there for less clutter
if os.path.isdir("%ichoose%i" % (options["-numdirs"].value,options["-numdirs"].value-group)):
print "The directory %ichoose%i already exists - will rename all old files with the extension .bak\n" % (options["-numdirs"].value,options["-numdirs"].value-group)
for path,dirs,filenames in os.walk("%ichoose%i" % (options["-numdirs"].value,options["-numdirs"].value-group)):
os.chdir("%ichoose%i/" % (options["-numdirs"].value,options["-numdirs"].value-group))
for filename in filenames:
if filename[:4]==".nfs":
pass
else:
os.system("mv %s %s.bak" % (filename,filename))
else:
os.system("mkdir %ichoose%i" % (options["-numdirs"].value,options["-numdirs"].value-group))
os.chdir("%ichoose%i/" % (options["-numdirs"].value,options["-numdirs"].value-group))
#initializing arrays to be fed into C
allcontactmatricesnumdirin=[]
lhcontactmatricesnumdirin=[]
rhcontactmatricesnumdirin=[]
#change all variables to C type variables
for i in range(len(allcontactmatricesnumdir)):
matrix=[array.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) for array in allcontactmatricesnumdir[i]]
allcontactmatricesnumdirin.append(matrix)
for i in range(len(lhcontactmatricesnumdir)):
matrix=[array.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) for array in lhcontactmatricesnumdir[i]]
lhcontactmatricesnumdirin.append(matrix)
for i in range(len(rhcontactmatricesnumdir)):
matrix=[array.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) for array in rhcontactmatricesnumdir[i]]
rhcontactmatricesnumdirin.append(matrix)
#create the C pointers for the number of frames arrays
Carraynumframesconsideredall=(ctypes.c_int * len(arraynumframesconsideredall))(*arraynumframesconsideredall)
Carraynumframesconsideredlh=(ctypes.c_int * len(arraynumframesconsideredlh))(*arraynumframesconsideredlh)
Carraynumframesconsideredrh=(ctypes.c_int * len(arraynumframesconsideredrh))(*arraynumframesconsideredrh)
#create xanglespointer
xanglespointer=[(ctypes.c_float * len(array)) (*array) for array in xangles]
#create the subsets object to get all the permutations
ssobject=subsets.EnumeratedSubsets()
#make sure the division of labour is correct
filenames=[]
starts=[]
ends=[]
if (options["-numsubsets"].value==None or options["-numsubsets"].value>numberpredicted):
dnum=numberpredicted/options["-numcores"].value
remainder=numberpredicted%options["-numcores"].value
else:
dnum=options["-numsubsets"].value/options["-numcores"].value
remainder=options["-numsubsets"].value%options["-numcores"].value
start=0
end=dnum
#create and start all of the processes
for i in range(options["-numcores"].value):
filenames.append("datafile%i.h5" % (i+1))
starts.append(start)
ends.append(end)
start=end+1
end+=dnum
if i==options["-numcores"].value-2:
end=end+remainder-1
#import a manager for the processes
manager=multiprocessing.Manager()
#run calculations
jobs=[]
for i in range(options["-numcores"].value):
process=multiprocessing.Process(target=calculations,args=(starts[i],ends[i],numberpredicted,ssobject,group,filenames[i],allcontactmatricesnumdirin,lhcontactmatricesnumdirin,rhcontactmatricesnumdirin,Carraynumframesconsideredall,Carraynumframesconsideredlh,Carraynumframesconsideredrh,xanglespointer,totalforbar))
jobs.append(process)
process.start()
#don't want control flow to continue in the parent until all of the children are finished
for job in jobs:
job.join()
#restart the count for the next group
restart()
#reset the integerarray for the next group
reset()
#create a link to the file containing all of the data
tables=[]
datafiles=[]
for i in range(options["-numcores"].value):
datafile=openFile(filenames[i],mode="r")
datafiles.append(datafile)
tables.append(datafile.root.detector.readout)
#retrieve all of the data
if thetanlh!=-99.00:
lhprobthetas=[]
for table in tables:
for x in table.iterrows():
lhprobthetas.append(x['lhprob'])
if thetanrh!=-99.00:
rhprobthetas=[]
for table in tables:
for x in table.iterrows():
rhprobthetas.append(x['rhprob'])
if thetanlhmean!=-99.00:
lhmeanthetas=[]
for table in tables:
for x in table.iterrows():
lhmeanthetas.append(x['lhmean'])
if thetanrhmean!=-99.00:
rhmeanthetas=[]
for table in tables:
for x in table.iterrows():
rhmeanthetas.append(x['rhmean'])
if thetanlhmode1!=-99.00:
lhmode1thetas=[]
for table in tables:
for x in table.iterrows():
lhmode1thetas.append(x['lhmode1'])
if thetanrhmode1!=-99.00:
rhmode1thetas=[]
for table in tables:
for x in table.iterrows():
rhmode1thetas.append(x['rhmode1'])
if thetanlhmode2!=-99.00:
lhmode2thetas=[]
for table in tables:
for x in table.iterrows():
lhmode2thetas.append(x['lhmode2'])
if thetanrhmode2!=-99.00:
rhmode2thetas=[]
for table in tables:
for x in table.iterrows():
rhmode2thetas.append(x['rhmode2'])
#get the quartiles and write to their respective file
minLHprob,firstquartileLHprob,medianLHprob,thirdquartileLHprob,maxLHprob=boxplotstats(lhprobthetas)
quartilefileLHprob.write(str(options["-numdirs"].value-group)+" "+str(minLHprob)+" "+str(firstquartileLHprob)+" "+str(medianLHprob)+" "+str(thirdquartileLHprob)+" "+str(maxLHprob)+"\n")
minRHprob,firstquartileRHprob,medianRHprob,thirdquartileRHprob,maxRHprob=boxplotstats(rhprobthetas)
quartilefileRHprob.write(str(options["-numdirs"].value-group)+" "+str(minRHprob)+" "+str(firstquartileRHprob)+" "+str(medianRHprob)+" "+str(thirdquartileRHprob)+" "+str(maxRHprob)+"\n")
minLHmean,firstquartileLHmean,medianLHmean,thirdquartileLHmean,maxLHmean=boxplotstats(lhmeanthetas)
quartilefileLHmean.write(str(options["-numdirs"].value-group)+" "+str(minLHmean)+" "+str(firstquartileLHmean)+" "+str(medianLHmean)+" "+str(thirdquartileLHmean)+" "+str(maxLHmean)+"\n")
minRHmean,firstquartileRHmean,medianRHmean,thirdquartileRHmean,maxRHmean=boxplotstats(rhmeanthetas)
quartilefileRHmean.write(str(options["-numdirs"].value-group)+" "+str(minRHmean)+" "+str(firstquartileRHmean)+" "+str(medianRHmean)+" "+str(thirdquartileRHmean)+" "+str(maxRHmean)+"\n")
if thetanlhmode1!=-99.00:
minLHmode1,firstquartileLHmode1,medianLHmode1,thirdquartileLHmode1,maxLHmode1=boxplotstats(lhmode1thetas)
quartilefileLHmode1.write(str(options["-numdirs"].value-group)+" "+str(minLHmode1)+" "+str(firstquartileLHmode1)+" "+str(medianLHmode1)+" "+str(thirdquartileLHmode1)+" "+str(maxLHmode1)+"\n")
if thetanrhmode1!=-99.00:
minRHmode1,firstquartileRHmode1,medianRHmode1,thirdquartileRHmode1,maxRHmode1=boxplotstats(rhmode1thetas)
quartilefileRHmode1.write(str(options["-numdirs"].value-group)+" "+str(minRHmode1)+" "+str(firstquartileRHmode1)+" "+str(medianRHmode1)+" "+str(thirdquartileRHmode1)+" "+str(maxRHmode1)+"\n")
if thetanlhmode2!=-99.00:
minLHmode2,firstquartileLHmode2,medianLHmode2,thirdquartileLHmode2,maxLHmode2=boxplotstats(lhmode2thetas)
quartilefileLHmode2.write(str(options["-numdirs"].value-group)+" "+str(minLHmode2)+" "+str(firstquartileLHmode2)+" "+str(medianLHmode2)+" "+str(thirdquartileLHmode2)+" "+str(maxLHmode2)+"\n")
if thetanrhmode2!=-99.00:
minRHmode2,firstquartileRHmode2,medianRHmode2,thirdquartileRHmode2,maxRHmode2=boxplotstats(rhmode2thetas)
quartilefileRHmode2.write(str(options["-numdirs"].value-group)+" "+str(minRHmode2)+" "+str(firstquartileRHmode2)+" "+str(medianRHmode2)+" "+str(thirdquartileRHmode2)+" "+str(maxRHmode2)+"\n")
#retrieve all of the contact matrices
allcontactmatrices=[]
for table in tables:
for x in table.iterrows():
allcontactmatrices.append(x['CMall'])
lhcontactmatrices=[]
for table in tables:
for x in table.iterrows():
lhcontactmatrices.append(x['CMlh'])
rhcontactmatrices=[]
for table in tables:
for x in table.iterrows():
rhcontactmatrices.append(x['CMrh'])
#write the contact matrices and get closest contacts
closestcontactsoverall,closestcontactslh,closestcontactsrh=contact_matrix_closest_contacts(allcontactmatrices,lhcontactmatrices,rhcontactmatrices,TM1CAs,TM2CAs)
#print contacts to a file
contactsfile=open(options["-outtopres"].value,"w")
contactsfile.write("Top contacts for %i simulations\n\n" % (options["-numdirs"].value))
contactsfile.write("Top overall contacts:\n\n")
for i in range(options["-topres"].value):
contactsfile.write("\t%s-%s\n" % (closestcontactsoverall[i][0],closestcontactsoverall[i][1]))
contactsfile.write("\nTop left-handed contacts:\n\n")
for i in range(options["-topres"].value):
contactsfile.write("\t%s-%s\n" % (closestcontactslh[i][0],closestcontactslh[i][1]))
contactsfile.write("\nTop right-handed contacts:\n\n")
for i in range(options["-topres"].value):
contactsfile.write("\t%s-%s\n" % (closestcontactsrh[i][0],closestcontactsrh[i][1]))
contactsfile.close()
#set the pallete for the contact matrix
if (options["-numsubsets"].value==None):
var1=10*(numberpredicted/100)
var2=20*(numberpredicted/100)
var3=30*(numberpredicted/100)
else:
var1=10*(options["-numsubsets"].value/100)
var2=20*(options["-numsubsets"].value/100)
var3=30*(options["-numsubsets"].value/100)
palette="0 \\\"yellow\\\", "+str(var1)+" \\\"purple\\\", "+str(var2)+" \\\"red\\\", "+str(var3)+" \\\"black\\\""
if (options["-numsubsets"].value==None):
tempcbrange=50*(float(numberpredicted)/float(100))
else:
tempcbrange=50*(float(options["-numsubsets"].value)/float(100))
cbrange="0:"+str(tempcbrange)
#make the gnuplot script, then run gnuplot on all of the contact matrix files to make all of the plots
os.system("cp /sansom/sb8/bioc1030/scripts/make_contact_matrix.gnu tmp.gnu")
os.system("sed -e \"s|XTICS|"+str(TM1reslist)+"|g;s|YTICS|"+str(TM2reslist)+"|g;s|PALETTE|"+str(palette)+"|g;s|CBRANGE|"+str(cbrange)+"|g\" tmp.gnu > make_contact_matrix.gnu")
os.system("rm tmp.gnu")
subsetsize=options["-numdirs"].value-group
if (options["-numsubsets"].value==None):
os.system("/usr/bin/gnuplot -e \"f1='"+str(options["-CMall"].value)+"';f2='"+str(options["-CMlh"].value)+"';f3='"+str(options["-CMrh"].value)+"';numdirs='"+str(options["-numdirs"].value)+"';subset='"+str(subsetsize)+"';subsets='All';max='"+str(options["-TM1l"].value+1)+"'\" make_contact_matrix.gnu")
else:
os.system("/usr/bin/gnuplot -e \"f1='"+str(options["-CMall"].value)+"';f2='"+str(options["-CMlh"].value)+"';f3='"+str(options["-CMrh"].value)+"';numdirs='"+str(options["-numdirs"].value)+"';subset='"+str(subsetsize)+"';subsets='"+str(options["-numsubsets"].value)+"';max='"+str(options["-TM1l"].value+1)+"'\" make_contact_matrix.gnu")
#close the data file, remove it, and change into another directory, in case there is another group
for datafile in datafiles:
datafile.close()
os.system("rm *.h5")
os.chdir("../")
'''
Option class
borrowed from martinize.py - this gives each Option object a type that the function is, e.g.
boolean or string), a number (the number of arguments expected for the option), the value
of the option (can be a default, or specified by the user), and the description of the
option (what the option is for, how to use it, when is it best used, etc.)
'''
class Option:
def __init__(self,func=str,num=1,default=None,description=""):
self.func = func
self.num = num
self.value = default
self.description = description
def __nonzero__(self):
if self.func == bool:
return self.value != False
return bool(self.value)
def __str__(self):
return self.value and str(self.value) or ""
def setvalue(self,v):
if len(v) == 1:
self.value = self.func(v[0])
else:
self.value = [ self.func(i) for i in v ]
'''
options
Structure borrowed from martinize.py - list of all the options names, as well as their respective
option objects (with type, number of arguments, default values, and explanations).
'''
options=[
("-g", Option(str, 1,None, "Input file (gro)")),
("-x", Option(str, 1,None, "Input file (xtc)")),
("-s", Option(int, 1,1, "Number of frames to skip (default=1)")),
("-ts", Option(int, 1,200, "Timestep used in the simulation (default=200)")),
("-res", Option(int, 4,None, "Selections for helices - takes 4 arguments:\n\t\t\t\t arg1: first residue of first helix\n\t\t\t\t arg2: last residue of first helix\n\t\t\t\t arg3: first residue last helix\n\t\t\t\t arg4: last residue of second helix")),
("-bounds", Option(float,2,None, "Lower and upper bound for crossing angle \n\t\t\t\t takes 2 arguments:\n\t\t\t\t arg1: lower bound xangle\n\t\t\t\t arg2: upper bound xangle")),
("-binsize", Option(float,1,1, "Number of bins for the crossing angle \n\t\t\t\t probability (default=1)")),
("-numdirs", Option(int, 1,5, "Number of directories you | |
uk_153
+ 14400 * uk_154
+ 5359375 * uk_155
+ 6890625 * uk_156
+ 918750 * uk_157
+ 8859375 * uk_158
+ 1181250 * uk_159
+ 1521570 * uk_16
+ 157500 * uk_160
+ 11390625 * uk_161
+ 1518750 * uk_162
+ 202500 * uk_163
+ 27000 * uk_164
+ 3025 * uk_17
+ 7920 * uk_18
+ 1650 * uk_19
+ 55 * uk_2
+ 880 * uk_20
+ 9625 * uk_21
+ 12375 * uk_22
+ 1650 * uk_23
+ 20736 * uk_24
+ 4320 * uk_25
+ 2304 * uk_26
+ 25200 * uk_27
+ 32400 * uk_28
+ 4320 * uk_29
+ 144 * uk_3
+ 900 * uk_30
+ 480 * uk_31
+ 5250 * uk_32
+ 6750 * uk_33
+ 900 * uk_34
+ 256 * uk_35
+ 2800 * uk_36
+ 3600 * uk_37
+ 480 * uk_38
+ 30625 * uk_39
+ 30 * uk_4
+ 39375 * uk_40
+ 5250 * uk_41
+ 50625 * uk_42
+ 6750 * uk_43
+ 900 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 370428042384 * uk_47
+ 77172508830 * uk_48
+ 41158671376 * uk_49
+ 16 * uk_5
+ 450172968175 * uk_50
+ 578793816225 * uk_51
+ 77172508830 * uk_52
+ 153424975 * uk_53
+ 401694480 * uk_54
+ 83686350 * uk_55
+ 44632720 * uk_56
+ 488170375 * uk_57
+ 627647625 * uk_58
+ 83686350 * uk_59
+ 175 * uk_6
+ 1051709184 * uk_60
+ 219106080 * uk_61
+ 116856576 * uk_62
+ 1278118800 * uk_63
+ 1643295600 * uk_64
+ 219106080 * uk_65
+ 45647100 * uk_66
+ 24345120 * uk_67
+ 266274750 * uk_68
+ 342353250 * uk_69
+ 225 * uk_7
+ 45647100 * uk_70
+ 12984064 * uk_71
+ 142013200 * uk_72
+ 182588400 * uk_73
+ 24345120 * uk_74
+ 1553269375 * uk_75
+ 1997060625 * uk_76
+ 266274750 * uk_77
+ 2567649375 * uk_78
+ 342353250 * uk_79
+ 30 * uk_8
+ 45647100 * uk_80
+ 166375 * uk_81
+ 435600 * uk_82
+ 90750 * uk_83
+ 48400 * uk_84
+ 529375 * uk_85
+ 680625 * uk_86
+ 90750 * uk_87
+ 1140480 * uk_88
+ 237600 * uk_89
+ 2572416961 * uk_9
+ 126720 * uk_90
+ 1386000 * uk_91
+ 1782000 * uk_92
+ 237600 * uk_93
+ 49500 * uk_94
+ 26400 * uk_95
+ 288750 * uk_96
+ 371250 * uk_97
+ 49500 * uk_98
+ 14080 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 116820 * uk_100
+ 148500 * uk_101
+ 95040 * uk_102
+ 1723095 * uk_103
+ 2190375 * uk_104
+ 1401840 * uk_105
+ 2784375 * uk_106
+ 1782000 * uk_107
+ 1140480 * uk_108
+ 912673 * uk_109
+ 4919743 * uk_11
+ 1354896 * uk_110
+ 112908 * uk_111
+ 1665393 * uk_112
+ 2117025 * uk_113
+ 1354896 * uk_114
+ 2011392 * uk_115
+ 167616 * uk_116
+ 2472336 * uk_117
+ 3142800 * uk_118
+ 2011392 * uk_119
+ 7303536 * uk_12
+ 13968 * uk_120
+ 206028 * uk_121
+ 261900 * uk_122
+ 167616 * uk_123
+ 3038913 * uk_124
+ 3863025 * uk_125
+ 2472336 * uk_126
+ 4910625 * uk_127
+ 3142800 * uk_128
+ 2011392 * uk_129
+ 608628 * uk_13
+ 2985984 * uk_130
+ 248832 * uk_131
+ 3670272 * uk_132
+ 4665600 * uk_133
+ 2985984 * uk_134
+ 20736 * uk_135
+ 305856 * uk_136
+ 388800 * uk_137
+ 248832 * uk_138
+ 4511376 * uk_139
+ 8977263 * uk_14
+ 5734800 * uk_140
+ 3670272 * uk_141
+ 7290000 * uk_142
+ 4665600 * uk_143
+ 2985984 * uk_144
+ 1728 * uk_145
+ 25488 * uk_146
+ 32400 * uk_147
+ 20736 * uk_148
+ 375948 * uk_149
+ 11411775 * uk_15
+ 477900 * uk_150
+ 305856 * uk_151
+ 607500 * uk_152
+ 388800 * uk_153
+ 248832 * uk_154
+ 5545233 * uk_155
+ 7049025 * uk_156
+ 4511376 * uk_157
+ 8960625 * uk_158
+ 5734800 * uk_159
+ 7303536 * uk_16
+ 3670272 * uk_160
+ 11390625 * uk_161
+ 7290000 * uk_162
+ 4665600 * uk_163
+ 2985984 * uk_164
+ 3025 * uk_17
+ 5335 * uk_18
+ 7920 * uk_19
+ 55 * uk_2
+ 660 * uk_20
+ 9735 * uk_21
+ 12375 * uk_22
+ 7920 * uk_23
+ 9409 * uk_24
+ 13968 * uk_25
+ 1164 * uk_26
+ 17169 * uk_27
+ 21825 * uk_28
+ 13968 * uk_29
+ 97 * uk_3
+ 20736 * uk_30
+ 1728 * uk_31
+ 25488 * uk_32
+ 32400 * uk_33
+ 20736 * uk_34
+ 144 * uk_35
+ 2124 * uk_36
+ 2700 * uk_37
+ 1728 * uk_38
+ 31329 * uk_39
+ 144 * uk_4
+ 39825 * uk_40
+ 25488 * uk_41
+ 50625 * uk_42
+ 32400 * uk_43
+ 20736 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 249524445217 * uk_47
+ 370428042384 * uk_48
+ 30869003532 * uk_49
+ 12 * uk_5
+ 455317802097 * uk_50
+ 578793816225 * uk_51
+ 370428042384 * uk_52
+ 153424975 * uk_53
+ 270585865 * uk_54
+ 401694480 * uk_55
+ 33474540 * uk_56
+ 493749465 * uk_57
+ 627647625 * uk_58
+ 401694480 * uk_59
+ 177 * uk_6
+ 477215071 * uk_60
+ 708442992 * uk_61
+ 59036916 * uk_62
+ 870794511 * uk_63
+ 1106942175 * uk_64
+ 708442992 * uk_65
+ 1051709184 * uk_66
+ 87642432 * uk_67
+ 1292725872 * uk_68
+ 1643295600 * uk_69
+ 225 * uk_7
+ 1051709184 * uk_70
+ 7303536 * uk_71
+ 107727156 * uk_72
+ 136941300 * uk_73
+ 87642432 * uk_74
+ 1588975551 * uk_75
+ 2019884175 * uk_76
+ 1292725872 * uk_77
+ 2567649375 * uk_78
+ 1643295600 * uk_79
+ 144 * uk_8
+ 1051709184 * uk_80
+ 166375 * uk_81
+ 293425 * uk_82
+ 435600 * uk_83
+ 36300 * uk_84
+ 535425 * uk_85
+ 680625 * uk_86
+ 435600 * uk_87
+ 517495 * uk_88
+ 768240 * uk_89
+ 2572416961 * uk_9
+ 64020 * uk_90
+ 944295 * uk_91
+ 1200375 * uk_92
+ 768240 * uk_93
+ 1140480 * uk_94
+ 95040 * uk_95
+ 1401840 * uk_96
+ 1782000 * uk_97
+ 1140480 * uk_98
+ 7920 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 118140 * uk_100
+ 148500 * uk_101
+ 64020 * uk_102
+ 1762255 * uk_103
+ 2215125 * uk_104
+ 954965 * uk_105
+ 2784375 * uk_106
+ 1200375 * uk_107
+ 517495 * uk_108
+ 238328 * uk_109
+ 3144578 * uk_11
+ 372868 * uk_110
+ 46128 * uk_111
+ 688076 * uk_112
+ 864900 * uk_113
+ 372868 * uk_114
+ 583358 * uk_115
+ 72168 * uk_116
+ 1076506 * uk_117
+ 1353150 * uk_118
+ 583358 * uk_119
+ 4919743 * uk_12
+ 8928 * uk_120
+ 133176 * uk_121
+ 167400 * uk_122
+ 72168 * uk_123
+ 1986542 * uk_124
+ 2497050 * uk_125
+ 1076506 * uk_126
+ 3138750 * uk_127
+ 1353150 * uk_128
+ 583358 * uk_129
+ 608628 * uk_13
+ 912673 * uk_130
+ 112908 * uk_131
+ 1684211 * uk_132
+ 2117025 * uk_133
+ 912673 * uk_134
+ 13968 * uk_135
+ 208356 * uk_136
+ 261900 * uk_137
+ 112908 * uk_138
+ 3107977 * uk_139
+ 9078701 * uk_14
+ 3906675 * uk_140
+ 1684211 * uk_141
+ 4910625 * uk_142
+ 2117025 * uk_143
+ 912673 * uk_144
+ 1728 * uk_145
+ | |
self.send_chat_msg('No indexes provided.')
else:
indexes = None
by_range = False
try:
if ':' in to_delete:
range_indexes = map(int, to_delete.split(':'))
temp_indexes = range(range_indexes[0], range_indexes[1] + 1)
if len(temp_indexes) > 1:
by_range = True
else:
temp_indexes = map(int, to_delete.split(','))
except ValueError as ve:
log.error('wrong format: %s' % ve)
else:
indexes = []
for i in temp_indexes:
if i < len(self.playlist.track_list) and i not in indexes:
indexes.append(i)
if indexes is not None and len(indexes) > 0:
result = self.playlist.delete(indexes, by_range)
if result is not None:
if by_range:
self.send_chat_msg('Deleted from index: %s to index: %s' %
(result['from'], result['to']))
elif result['deleted_indexes_len'] is 1:
self.send_chat_msg('Deleted %s' % result['track_title'])
else:
self.send_chat_msg('Deleted tracks at index: %s' %
', '.join(result['deleted_indexes']))
else:
self.send_chat_msg('Nothing was deleted.')
def do_media_replay(self):
""" Replay the currently playing track. """
if self.is_client_mod:
if self.playlist.track is not None:
self.cancel_timer()
track = self.playlist.replay()
self.send_yut_play(track.id, track.time, track.title)
self.timer(track.time)
def do_play_media(self):
""" Play a track on pause . """
if self.is_client_mod:
if self.playlist.track is not None:
if self.playlist.has_active_track:
self.cancel_timer()
if self.playlist.is_paused:
self.playlist.play(self.playlist.elapsed)
self.send_yut_play(self.playlist.track.id, self.playlist.track.time,
self.playlist.track.title, self.playlist.elapsed) #
self.timer(self.playlist.remaining)
def do_media_pause(self):
""" Pause a track. """
if self.is_client_mod:
track = self.playlist.track
if track is not None:
if self.playlist.has_active_track:
self.cancel_timer()
self.playlist.pause()
self.send_yut_pause(track.id, track.time, self.playlist.elapsed)
def do_close_media(self):
""" Close a track playing. """
if self.is_client_mod:
if self.playlist.has_active_track:
self.cancel_timer()
self.playlist.stop()
self.send_yut_stop(self.playlist.track.id, self.playlist.track.time, self.playlist.elapsed)
def do_seek_media(self, time_point):
"""
Time search a track.
:param time_point: The time point in which to search to.
:type time_point: str
"""
if self.is_client_mod:
if ('h' in time_point) or ('m' in time_point) or ('s' in time_point):
offset = pinylib.string_util.convert_to_seconds(time_point)
if offset == 0:
self.send_chat_msg('Invalid seek time.')
else:
track = self.playlist.track
if track is not None:
if 0 < offset < track.time:
if self.playlist.has_active_track:
self.cancel_timer()
if self.playlist.is_paused:
self.playlist.pause(offset=offset) #
self.send_yut_pause(track.id, track.time, offset)
else:
self.playlist.play(offset)
self.send_yut_play(track.id, track.time, track.title, offset)
self.timer(self.playlist.remaining)
def do_clear_playlist(self):
""" Clear the playlist for items."""
if self.is_client_mod:
if len(self.playlist.track_list) > 0:
pl_length = str(len(self.playlist.track_list))
self.playlist.clear()
self.send_chat_msg('Deleted %s items in the playlist.' % pl_length)
else:
self.send_chat_msg('The playlist is empty, nothing to delete.')
def do_playlist_info(self): # TODO: this needs more work !
""" Shows the next tracks in the playlist. """
if self.is_client_mod:
if len(self.playlist.track_list) > 0:
tracks = self.playlist.get_tracks()
if len(tracks) > 0:
# If i is 0 then mark that as the next track
_ = '\n'.join('(%s) - %s %s' % (track[0], track[1].title, self.format_time(track[1].time))
for i, track in enumerate(tracks))
self.send_chat_msg(_)
def do_youtube_search(self, search_str):
"""
Search youtube for a list of matching candidates.
:param search_str: The search term to search for.
:type search_str: str
"""
if self.is_client_mod:
if len(search_str) == 0:
self.send_chat_msg('Missing search string.')
else:
self.search_list = youtube.search_list(search_str, results=5)
if len(self.search_list) > 0:
self.is_search_list_yt_playlist = False
_ = '\n'.join('(%s) %s %s' % (i, d['video_title'], self.format_time(d['video_time']))
for i, d in enumerate(self.search_list)) #
self.send_chat_msg(_)
else:
self.send_chat_msg('Could not find anything matching: %s' % search_str)
def do_play_youtube_search(self, int_choice):
"""
Play a track from a previous youtube search list.
:param int_choice: The index of the track in the search.
:type int_choice: str | int
"""
if self.is_client_mod:
if not self.is_search_list_yt_playlist:
if len(self.search_list) > 0:
try:
int_choice = int(int_choice)
except ValueError:
self.send_chat_msg('Only numbers allowed.')
else:
if 0 <= int_choice <= len(self.search_list) - 1:
if self.playlist.has_active_track:
track = self.playlist.add(self.active_user.nick, self.search_list[int_choice])
self.send_chat_msg('Added (%s) %s %s' %
(self.playlist.last_index,
track.title, self.format_time(track.time)))
else:
track = self.playlist.start(self.active_user.nick, self.search_list[int_choice])
self.send_yut_play(track.id, track.time, track.title)
self.timer(track.time)
else:
self.send_chat_msg('Please make a choice between 0-%s' % str(len(self.search_list) - 1))
else:
self.send_chat_msg('No youtube track id\'s in the search list.')
else:
self.send_chat_msg('The search list only contains youtube playlist id\'s.')
def do_clear(self):
""" Clears the chat box. """
self.send_chat_msg('_\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'
'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n_')
def do_nick(self, new_nick):
"""
Set a new nick for the bot.
:param new_nick: The new nick name.
:type new_nick: str
"""
if len(new_nick) is 0:
self.nickname = pinylib.string_util.create_random_string(5, 25)
self.set_nick()
else:
self.nickname = new_nick
self.set_nick()
def do_kick(self, user_name):
"""
Kick a user out of the room.
:param user_name: The username to kick.
:type user_name: str
"""
if self.is_client_mod:
if len(user_name) is 0:
self.send_chat_msg('Missing username.')
elif user_name == self.nickname:
self.send_chat_msg('Action not allowed.')
else:
if user_name.startswith('*'):
user_name = user_name.replace('*', '')
_users = self.users.search_containing(user_name)
if len(_users) > 0:
for i, user in enumerate(_users):
if user.nick != self.nickname and user.user_level > self.active_user.user_level:
if i <= pinylib.CONFIG.B_MAX_MATCH_BANS - 1:
self.send_kick_msg(user.id)
else:
_user = self.users.search_by_nick(user_name)
if _user is None:
self.send_chat_msg('No user named: %s' % user_name)
elif _user.user_level < self.active_user.user_level:
self.send_chat_msg('Not allowed.')
else:
self.send_kick_msg(_user.id)
def do_ban(self, user_name):
"""
Ban a user from the room.
:param user_name: The username to ban.
:type user_name: str
"""
if self.is_client_mod:
if len(user_name) is 0:
self.send_chat_msg('Missing username.')
elif user_name == self.nickname:
self.send_chat_msg('Action not allowed.')
else:
if user_name.startswith('*'):
user_name = user_name.replace('*', '')
_users = self.users.search_containing(user_name)
if len(_users) > 0:
for i, user in enumerate(_users):
if user.nick != self.nickname and user.user_level > self.active_user.user_level:
if i <= pinylib.CONFIG.B_MAX_MATCH_BANS - 1:
self.send_ban_msg(user.id)
else:
_user = self.users.search_by_nick(user_name)
if _user is None:
self.send_chat_msg('No user named: %s' % user_name)
elif _user.user_level < self.active_user.user_level:
self.send_chat_msg('Not allowed.')
else:
self.send_ban_msg(_user.id)
def do_bad_nick(self, bad_nick):
"""
Adds a username to the nick bans file.
:param bad_nick: The bad nick to write to the nick bans file.
:type bad_nick: str
"""
if self.is_client_mod:
if len(bad_nick) is 0:
self.send_chat_msg('Missing username.')
elif bad_nick in pinylib.CONFIG.B_NICK_BANS:
self.send_chat_msg('%s is already in list.' % bad_nick)
else:
pinylib.file_handler.file_writer(self.config_path,
pinylib.CONFIG.B_NICK_BANS_FILE_NAME, bad_nick)
self.send_chat_msg('%s was added to file.' % bad_nick)
self.load_list(nicks=True)
def do_remove_bad_nick(self, bad_nick):
"""
Removes nick from the nick bans file.
:param bad_nick: The bad nick to remove from the nick bans file.
:type bad_nick: str
"""
if self.is_client_mod:
if len(bad_nick) is 0:
self.send_chat_msg('Missing username')
else:
if bad_nick in pinylib.CONFIG.B_NICK_BANS:
rem = pinylib.file_handler.remove_from_file(self.config_path,
pinylib.CONFIG.B_NICK_BANS_FILE_NAME,
bad_nick)
if rem:
self.send_chat_msg('%s was removed.' % bad_nick)
self.load_list(nicks=True)
def do_bad_string(self, bad_string):
"""
Adds a string to the string bans file.
:param bad_string: The bad string to add to the string bans file.
:type bad_string: str
"""
if self.is_client_mod:
if len(bad_string) is 0:
self.send_chat_msg('Ban string can\'t be blank.')
elif len(bad_string) < 3:
self.send_chat_msg('Ban string to short: ' + str(len(bad_string)))
elif bad_string in pinylib.CONFIG.B_STRING_BANS:
self.send_chat_msg('%s is already in list.' % bad_string)
else:
pinylib.file_handler.file_writer(self.config_path,
pinylib.CONFIG.B_STRING_BANS_FILE_NAME, bad_string)
self.send_chat_msg('%s was added to file.' % bad_string)
self.load_list(strings=True)
def do_remove_bad_string(self, bad_string):
"""
Removes a string from the string bans file.
:param bad_string: The bad string to remove from the string bans file.
:type bad_string: str
"""
if self.is_client_mod:
if len(bad_string) is 0:
self.send_chat_msg('Missing word string.')
else:
if bad_string in pinylib.CONFIG.B_STRING_BANS:
rem = pinylib.file_handler.remove_from_file(self.config_path,
pinylib.CONFIG.B_STRING_BANS_FILE_NAME,
bad_string)
if rem:
self.send_chat_msg('%s was removed.' % bad_string)
self.load_list(strings=True)
def do_bad_account(self, bad_account_name):
"""
Adds an account name to the account bans file.
:param bad_account_name: The bad account name to add to the account bans file.
:type bad_account_name: str
"""
if self.is_client_mod:
if len(bad_account_name) is 0:
self.send_chat_msg('Account can\'t be blank.')
elif len(bad_account_name) < 3:
self.send_chat_msg('Account to short: ' + str(len(bad_account_name)))
elif bad_account_name in pinylib.CONFIG.B_ACCOUNT_BANS:
self.send_chat_msg('%s is already in list.' % bad_account_name)
else:
pinylib.file_handler.file_writer(self.config_path,
pinylib.CONFIG.B_ACCOUNT_BANS_FILE_NAME,
bad_account_name)
self.send_chat_msg('%s was added to file.' % bad_account_name)
self.load_list(accounts=True)
def do_remove_bad_account(self, bad_account):
"""
Removes an account from the account bans file.
:param bad_account: The badd account name to remove from account bans file.
:type bad_account: str
"""
if self.is_client_mod:
if len(bad_account) is 0:
self.send_chat_msg('Missing account.')
else:
if bad_account in pinylib.CONFIG.B_ACCOUNT_BANS:
rem = pinylib.file_handler.remove_from_file(self.config_path,
pinylib.CONFIG.B_ACCOUNT_BANS_FILE_NAME,
bad_account)
if rem:
self.send_chat_msg('%s was removed.' % bad_account)
self.load_list(accounts=True)
def do_list_info(self, list_type):
"""
Shows info of different lists/files.
:param list_type: The type of list to find info for.
:type list_type: str
"""
if self.is_client_mod:
if len(list_type) is 0:
self.send_chat_msg('Missing list type.')
else:
if list_type.lower() == 'bn':
if len(pinylib.CONFIG.B_NICK_BANS) is 0:
self.send_chat_msg('No items in this list.')
else:
self.send_chat_msg('%s nicks bans in list.' % len(pinylib.CONFIG.B_NICK_BANS))
elif list_type.lower() == 'bs':
if len(pinylib.CONFIG.B_STRING_BANS) is 0:
self.send_chat_msg('No items in this list.')
else:
self.send_chat_msg('%s string bans in list.' % pinylib.CONFIG.B_STRING_BANS)
elif list_type.lower() == 'ba':
if len(pinylib.CONFIG.B_ACCOUNT_BANS) is 0:
self.send_chat_msg('No items in this list.')
else:
self.send_chat_msg('%s account bans in list.' % pinylib.CONFIG.B_ACCOUNT_BANS)
elif list_type.lower() == 'bl':
if len(self.users.banned_users) == 0:
self.send_chat_msg('The banlist is empty.')
else:
_ban_list = '\n'.join('(%s) %s:%s [%s]' %
(i, banned_user.nick, banned_user.account, banned_user.ban_id)
for i, banned_user in enumerate(self.users.banned_users))
if len(_ban_list) > | |
# -*- coding: utf-8 -*-
#
"""
Solve the linearized Ginzburg--Landau problem.
"""
from scipy.sparse.linalg import LinearOperator
import time
import numpy as np
import cmath
import matplotlib.pyplot as pp
# from matplotlib import rc
# rc( 'text', usetex = True )
# rc( 'font', family = 'serif' )
import meshplex
import pynosh.modelevaluator_nls
import pynosh.modelevaluator_bordering_constant
# import pynosh.preconditioners
import pynosh.numerical_methods as nm
def _main():
"""Main function.
"""
args = _parse_input_arguments()
# Setting the default file_mvp.
if args.file_mvp is None:
args.file_mvp = args.filename
# Read the magnetic vector potential.
mesh, point_data, field_data = meshplex.read(args.file_mvp)
# build the model evaluator
print(("Creating model evaluator...",))
start = time.time()
num_coords = len(mesh.node_coords)
nls_modeleval = pynosh.modelevaluator_nls.NlsModelEvaluator(
mesh,
V=-np.ones(num_coords),
A=point_data["A"],
preconditioner_type=args.preconditioner_type,
num_amg_cycles=args.num_amg_cycles,
)
if args.bordering:
modeleval = pynosh.bordered_modelevaluator.BorderedModelEvaluator(nls_modeleval)
else:
modeleval = nls_modeleval
end = time.time()
print(("done. (%gs)" % (end - start)))
# Run through all time steps.
assert len(args.timesteps) == len(args.mu), (
"There must be as many time steps as mus (%d != %d)."
% (len(args.timesteps), len(args.mu))
)
for (timestep, mu) in zip(args.timesteps, args.mu):
relresvec = _solve_system(modeleval, args.filename, timestep, mu, args)
print("relresvec:")
print(relresvec)
print(("num iters:", len(relresvec) - 1))
if args.show_relres:
pp.semilogy(relresvec, "k")
pp.show()
return
def _solve_system(modeleval, filename, timestep, mu, args):
# read the mesh
print(("Reading current psi...",))
start = time.time()
mesh, point_data, field_data = meshplex.read(filename, timestep=timestep)
total = time.time() - start
print(("done (%gs)." % total))
num_coords = len(mesh.node_coords)
# set psi at which to create the Jacobian
print(("Creating initial guess and right-hand side...",))
start = time.time()
current_psi = (point_data["psi"][:, 0] + 1j * point_data["psi"][:, 1]).reshape(
-1, 1
)
# Perturb a bit.
eps = 1.0e-10
perturbation = eps * (np.random.rand(num_coords) + 1j * np.random.rand(num_coords))
current_psi += perturbation.reshape(current_psi.shape)
if args.bordering:
phi0 = np.zeros((num_coords + 1, 1), dtype=complex)
# right hand side
x = np.empty((num_coords + 1, 1), dtype=complex)
x[0:num_coords] = current_psi
x[num_coords] = 0.0
else:
# create right hand side and initial guess
phi0 = np.zeros((num_coords, 1), dtype=complex)
x = current_psi
# right hand side
# rhs = np.ones( (num_coords,1), dtype=complex )
# rhs = np.empty( num_coords, dtype = complex )
# radius = np.random.rand( num_coords )
# arg = np.random.rand( num_coords ) * 2.0 * cmath.pi
# for k in range( num_coords ):
# rhs[ k ] = cmath.rect(radius[k], arg[k])
rhs = modeleval.compute_f(x=x, mu=mu, g=1.0)
end = time.time()
print(("done. (%gs)" % (end - start)))
print(("||rhs|| = %g" % np.sqrt(modeleval.inner_product(rhs, rhs))))
# create the linear operator
print(("Getting Jacobian...",))
start_time = time.clock()
jacobian = modeleval.get_jacobian(x=x, mu=mu, g=1.0)
end_time = time.clock()
print(("done. (%gs)" % (end_time - start_time)))
# create precondictioner object
print(("Getting preconditioner...",))
start_time = time.clock()
prec = modeleval.get_preconditioner_inverse(x=x, mu=mu, g=1.0)
end_time = time.clock()
print(("done. (%gs)" % (end_time - start_time)))
# Get reference solution
# print 'Get reference solution (dim = %d)...' % (2*num_coords),
# start_time = time.clock()
# ref_sol, info, relresvec, errorvec = nm.minres_wrap( jacobian, rhs,
# x0 = phi0,
# tol = 1.0e-14,
# M = prec,
# inner_product = modeleval.inner_product,
# explicit_residual = True
# )
# end_time = time.clock()
# if info == 0:
# print 'success!',
# else:
# print 'no convergence.',
# print ' (', end_time - start_time, 's,', len(relresvec)-1 ,' iters).'
if args.use_deflation:
W = 1j * x
AW = jacobian * W
P, x0new = nm.get_projection(
W, AW, rhs, phi0, inner_product=modeleval.inner_product
)
else:
# AW = np.zeros((len(current_psi),1), dtype=np.complex)
P = None
x0new = phi0
if args.krylov_method == "cg":
lin_solve = nm.cg
elif args.krylov_method == "minres":
lin_solve = nm.minres
# elif args.krylov_method == 'minresfo':
# lin_solve = nm.minres
# lin_solve_args.update({'full_reortho': True})
elif args.krylov_method == "gmres":
lin_solve = nm.gmres
else:
raise ValueError("Unknown Krylov solver " "%s" "." % args.krylov_method)
print(
(
"Solving the system (len(x) = %d, bordering: %r)..."
% (len(x), args.bordering),
)
)
start_time = time.clock()
timer = False
out = lin_solve(
jacobian,
rhs,
x0new,
tol=args.tolerance,
Mr=P,
M=prec,
# maxiter = 2*num_coords,
maxiter=500,
inner_product=modeleval.inner_product,
explicit_residual=True,
# timer=timer
# exact_solution = ref_sol
)
end_time = time.clock()
print(("done. (%gs)" % (end_time - start_time)))
print(("(%d,%d)" % (2 * num_coords, len(out["relresvec"]) - 1)))
# compute actual residual
# res = rhs - jacobian * out['xk']
# print '||b-Ax|| = %g' % np.sqrt(modeleval.inner_product(res, res))
if timer:
# pretty-print timings
print(
(
" " * 22
+ "sum".rjust(14)
+ "mean".rjust(14)
+ "min".rjust(14)
+ "std dev".rjust(14)
)
)
for key, item in list(out["times"].items()):
print(
(
"'%s': %12g %12g %12g %12g"
% (key.ljust(20), item.sum(), item.mean(), item.min(), item.std())
)
)
# Get the number of MG cycles.
# 'modeleval.num_cycles' contains the number of MG cycles executed
# for all AMG calls run.
# In nm.minres, two calls to the precondictioner are done prior to the
# actual iteration for the normalization of the residuals.
# With explicit_residual=True, *two* calls to the preconditioner are done
# in each iteration.
# What we would like to have here is the number of V-cycles done per loop
# when explicit_residual=False. Also, forget about the precondictioner
# calls for the initialization.
# Hence, cut of the first two and replace it by 0, and out of the
# remainder take every other one.
# nc = [0] + modeleval.tot_amg_cycles[2::2]
# nc_cumsum = np.cumsum(nc)
# pp.semilogy(nc_cumsum, out['relresvec'], color='0.0')
# pp.show()
# import matplotlib2tikz
# matplotlib2tikz.save('cycle10.tex')
# matplotlib2tikz.save('inf.tex')
return out["relresvec"]
def _run_one_mu(modeleval, precs, jacobian, rhs, psi0, test_preconditioners):
# build the kinetic energy operator
print("Building the KEO...")
start_time = time.clock()
modeleval._assemble_kinetic_energy_operator()
end_time = time.clock()
print(("done.", end_time - start_time))
# Run the preconditioners and gather the relative residuals.
relresvecs = _run_preconditioners(jacobian, rhs, psi0, test_preconditioners)
# Plot the relative residuals.
_plot_relresvecs(test_preconditioners, relresvecs)
matplotlib2tikz.save(
"one-mu.tikz", figurewidth="\\figurewidth", figureheight="\\figureheight"
)
pp.show()
return
def _run_along_top(modeleval, precs, jacobian, rhs, psi0, test_preconditioners):
num_coords = len(psi0)
# prepare the range of mus
mu_min = 0.0
mu_max = 5.0
num_steps = 1001
mus = np.linspace(mu_min, mu_max, num=num_steps)
num_iterations = {}
for prec in test_preconditioners:
num_iterations[prec["name"]] = []
# run over the mu and solve the equation systems
for mu in mus:
print()
print((" mu =", mu))
# build the kinetic energy operator
modeleval.set_parameter(mu)
precs.set_parameter(mu)
print("Building the KEO...")
start_time = time.clock()
modeleval._assemble_kinetic_energy_operator()
end_time = time.clock()
print(("done. (", end_time - start_time, "s)."))
# Run the preconditioners and gather the relative residuals.
relresvecs = _run_preconditioners(jacobian, rhs, psi0, test_preconditioners)
# append the number of iterations to the data
for prec in test_preconditioners:
num_iterations[prec["name"]].append(len(relresvecs[prec["name"]]) - 1)
# plot them all
for name, num_iteration in num_iterations.items():
pp.plot(mus, num_iteration, label=name)
# add title and so forth
pp.title("CG convergence for $J$")
pp.xlabel("$\mu$")
pp.ylabel("Number of iterations till $<10^{-10}$")
pp.legend()
matplotlib2tikz.save(
"toprun.tikz", figurewidth="\\figurewidth", figureheight="\\figureheight"
)
pp.show()
return
def _run_different_meshes(modeleval, precs):
mesh_files = [
"states/rectangle10.vtu",
"states/rectangle20.vtu",
"states/rectangle30.vtu",
"states/rectangle40.vtu",
"states/rectangle50.vtu",
"states/rectangle60.vtu",
"states/rectangle70.vtu",
"states/rectangle80.vtu",
"states/rectangle90.vtu",
"states/rectangle100.vtu",
# 'states/rectangle110.vtu',
# 'states/rectangle120.vtu',
# 'states/rectangle130.vtu',
# 'states/rectangle140.vtu',
# 'states/rectangle150.vtu',
# 'states/rectangle160.vtu',
# 'states/rectangle170.vtu',
# 'states/rectangle180.vtu',
# 'states/rectangle190.vtu',
# 'states/rectangle200.vtu'
]
mu = 1.0e-0
modeleval.set_parameter(mu)
precs.set_parameter(mu)
# loop over the meshes and compute
nums_unknowns = []
num_iterations = {}
for mesh_file in mesh_files:
# read and set the mesh
print()
print("Reading the mesh...")
try:
mesh = vtkio.read_mesh(mesh_file)
except AttributeError:
raise IOError("Could not read from file ", mesh_file, ".")
print(" done.")
modeleval.set_mesh(mesh)
precs.set_mesh(mesh)
# recreate all the objects necessary to perform the precondictioner run
num_coords = len(mesh.nodes)
nums_unknowns.append(num_coords)
# create the linear operator
jacobian = LinearOperator(
(num_coords, num_coords), matvec=modeleval.compute_jacobian, dtype=complex
)
# set psi at which to create the Jacobian
# generate random numbers within the unit circle
radius = np.random.rand(num_coords)
arg = np.random.rand(num_coords)
current_psi = np.zeros(num_coords, dtype=complex)
for k in range(num_coords):
current_psi[k] = cmath.rect(radius[k], arg[k])
modeleval.set_current_psi(current_psi)
# create right hand side and initial guess
rhs = np.random.rand(num_coords) + 1j * np.random.rand(num_coords)
# initial guess for all operations
psi0 = np.zeros(num_coords, dtype=complex)
test_preconditioners = _create_preconditioner_list(precs, num_coords)
# build the kinetic energy operator
print("Building the KEO...")
start_time = time.clock()
modeleval._assemble_kinetic_energy_operator()
end_time = time.clock()
print(("done. (", end_time - start_time, "s)."))
# Run the preconditioners and gather the relative residuals.
relresvecs = _run_preconditioners(jacobian, rhs, psi0, test_preconditioners)
# append the number of iterations to the data
for prec in test_preconditioners:
if prec["name"] not in list(num_iterations.keys()):
num_iterations[prec["name"]] = []
num_iterations[prec["name"]].append(len(relresvecs[prec["name"]]) - 1)
print(num_iterations)
# plot them all
plot_handles = []
for prec in test_preconditioners:
pp.semilogy(
| |
<reponame>fderyckere/pgmpy
#!/usr/bin/env python3
import itertools
import networkx as nx
from pgmpy.base import UndirectedGraph
from pgmpy.independencies import Independencies
class DAG(nx.DiGraph):
"""
Base class for all Directed Graphical Models.
Each node in the graph can represent either a random variable, `Factor`,
or a cluster of random variables. Edges in the graph represent the
dependencies between these.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is
created. The data can be an edge list or any Networkx graph object.
Examples
--------
Create an empty DAG with no nodes and no edges
>>> from pgmpy.base import DAG
>>> G = DAG()
G can be grown in several ways:
**Nodes:**
Add one node at a time:
>>> G.add_node(node='a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(nodes=['a', 'b'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(u='a', v='b')
a list of edges,
>>> G.add_edges_from(ebunch=[('a', 'b'), ('b', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
"""
def __init__(self, ebunch=None):
super(DAG, self).__init__(ebunch)
cycles = []
try:
cycles = list(nx.find_cycle(self))
except nx.NetworkXNoCycle:
pass
else:
out_str = "Cycles are not allowed in a DAG."
out_str += "\nEdges indicating the path taken for a loop: "
out_str += "".join(["({0},{1}) ".format(u, v) for (u, v) in cycles])
raise ValueError(out_str)
def add_node(self, node, weight=None):
"""
Adds a single node to the Graph.
Parameters
----------
node: str, int, or any hashable python object.
The node to add to the graph.
weight: int, float
The weight of the node.
Examples
--------
>>> from pgmpy.base import DAG
>>> G = DAG()
>>> G.add_node(node='A')
>>> sorted(G.nodes())
['A']
Adding a node with some weight.
>>> G.add_node(node='B', weight=0.3)
The weight of these nodes can be accessed as:
>>> G.node['B']
{'weight': 0.3}
>>> G.node['A']
{'weight': None}
"""
# Check for networkx 2.0 syntax
if isinstance(node, tuple) and len(node) == 2 and isinstance(node[1], dict):
node, attrs = node
if attrs.get("weight", None) is not None:
attrs["weight"] = weight
else:
attrs = {"weight": weight}
super(DAG, self).add_node(node, weight=weight)
def add_nodes_from(self, nodes, weights=None):
"""
Add multiple nodes to the Graph.
**The behviour of adding weights is different than in networkx.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, or any hashable python
object).
weights: list, tuple (default=None)
A container of weights (int, float). The weight value at index i
is associated with the variable at index i.
Examples
--------
>>> from pgmpy.base import DAG
>>> G = DAG()
>>> G.add_nodes_from(nodes=['A', 'B', 'C'])
>>> sorted(G.nodes())
['A', 'B', 'C']
Adding nodes with weights:
>>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6])
>>> G.node['D']
{'weight': 0.3}
>>> G.node['E']
{'weight': 0.6}
>>> G.node['A']
{'weight': None}
"""
nodes = list(nodes)
if weights:
if len(nodes) != len(weights):
raise ValueError(
"The number of elements in nodes and weights" "should be equal."
)
for index in range(len(nodes)):
self.add_node(node=nodes[index], weight=weights[index])
else:
for node in nodes:
self.add_node(node=node)
def add_edge(self, u, v, weight=None):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Parameters
----------
u, v : nodes
Nodes can be any hashable Python object.
weight: int, float (default=None)
The weight of the edge
Examples
--------
>>> from pgmpy.base import DAG
>>> G = DAG()
>>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles'])
>>> G.add_edge(u='Alice', v='Bob')
>>> G.nodes()
['Alice', 'Bob', 'Charles']
>>> G.edges()
[('Alice', 'Bob')]
When the node is not already present in the graph:
>>> G.add_edge(u='Alice', v='Ankur')
>>> G.nodes()
['Alice', 'Ankur', 'Bob', 'Charles']
>>> G.edges()
[('Alice', 'Bob'), ('Alice', 'Ankur')]
Adding edges with weight:
>>> G.add_edge('Ankur', 'Maria', weight=0.1)
>>> G.edge['Ankur']['Maria']
{'weight': 0.1}
"""
super(DAG, self).add_edge(u, v, weight=weight)
def add_edges_from(self, ebunch, weights=None):
"""
Add all the edges in ebunch.
If nodes referred in the ebunch are not already present, they
will be automatically added. Node names can be any hashable python
object.
**The behavior of adding weights is different than networkx.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the graph.
The edges must be given as 2-tuples (u, v).
weights: list, tuple (default=None)
A container of weights (int, float). The weight value at index i
is associated with the edge at index i.
Examples
--------
>>> from pgmpy.base import DAG
>>> G = DAG()
>>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles'])
>>> G.add_edges_from(ebunch=[('Alice', 'Bob'), ('Bob', 'Charles')])
>>> G.nodes()
['Alice', 'Bob', 'Charles']
>>> G.edges()
[('Alice', 'Bob'), ('Bob', 'Charles')]
When the node is not already in the model:
>>> G.add_edges_from(ebunch=[('Alice', 'Ankur')])
>>> G.nodes()
['Alice', 'Bob', 'Charles', 'Ankur']
>>> G.edges()
[('Alice', 'Bob'), ('Bob', 'Charles'), ('Alice', 'Ankur')]
Adding edges with weights:
>>> G.add_edges_from([('Ankur', 'Maria'), ('Maria', 'Mason')],
... weights=[0.3, 0.5])
>>> G.edge['Ankur']['Maria']
{'weight': 0.3}
>>> G.edge['Maria']['Mason']
{'weight': 0.5}
"""
ebunch = list(ebunch)
if weights:
if len(ebunch) != len(weights):
raise ValueError(
"The number of elements in ebunch and weights" "should be equal"
)
for index in range(len(ebunch)):
self.add_edge(ebunch[index][0], ebunch[index][1], weight=weights[index])
else:
for edge in ebunch:
self.add_edge(edge[0], edge[1])
def get_parents(self, node):
"""
Returns a list of parents of node.
Throws an error if the node is not present in the graph.
Parameters
----------
node: string, int or any hashable python object.
The node whose parents would be returned.
Examples
--------
>>> from pgmpy.base import DAG
>>> G = DAG(ebunch=[('diff', 'grade'), ('intel', 'grade')])
>>> G.get_parents(node='grade')
['diff', 'intel']
"""
return list(self.predecessors(node))
def moralize(self):
"""
Removes all the immoralities in the DAG and creates a moral
graph (UndirectedGraph).
A v-structure X->Z<-Y is an immorality if there is no directed edge
between X and Y.
Examples
--------
>>> from pgmpy.base import DAG
>>> G = DAG(ebunch=[('diff', 'grade'), ('intel', 'grade')])
>>> moral_graph = G.moralize()
>>> moral_graph.edges()
[('intel', 'grade'), ('intel', 'diff'), ('grade', 'diff')]
"""
moral_graph = UndirectedGraph(self.to_undirected().edges())
for node in self.nodes():
moral_graph.add_edges_from(
itertools.combinations(self.get_parents(node), 2)
)
return moral_graph
def get_leaves(self):
"""
Returns a list of leaves of the graph.
Examples
--------
>>> from pgmpy.base import DAG
>>> graph = DAG([('A', 'B'), ('B', 'C'), ('B', 'D')])
>>> graph.get_leaves()
['C', 'D']
"""
return [node for node, out_degree in self.out_degree_iter() if out_degree == 0]
def out_degree_iter(self, nbunch=None, weight=None):
if nx.__version__.startswith("1"):
return super(DAG, self).out_degree_iter(nbunch, weight)
else:
return iter(self.out_degree(nbunch, weight))
def in_degree_iter(self, nbunch=None, weight=None):
if nx.__version__.startswith("1"):
return super(DAG, self).in_degree_iter(nbunch, weight)
else:
return iter(self.in_degree(nbunch, weight))
def get_roots(self):
"""
Returns a list of roots of the graph.
Examples
--------
>>> from pgmpy.base import DAG
>>> graph = DAG([('A', 'B'), ('B', 'C'), ('B', 'D'), ('E', 'B')])
>>> graph.get_roots()
['A', 'E']
"""
return [
node for node, in_degree in dict(self.in_degree()).items() if in_degree == 0
]
def get_children(self, node):
"""
Returns a list of children of node.
Throws an error if the node is not present in the graph.
Parameters
----------
node: string, int or any hashable python object.
The node whose children would be returned.
Examples
--------
>>> from pgmpy.base import DAG
>>> g = DAG(ebunch=[('A', 'B'), ('C', 'B'), ('B', 'D'),
('B', 'E'), ('B', 'F'), ('E', 'G')])
>>> g.get_children(node='B')
['D', 'E', 'F']
"""
return list(self.successors(node))
def get_independencies(self, latex=False):
"""
Computes independencies in the DAG, by checking d-seperation.
Parameters
----------
latex: boolean
If latex=True then latex string of the independence assertion
would be created.
Examples
--------
>>> from pgmpy.base import DAG
>>> chain = DAG([('X', 'Y'), ('Y', 'Z')])
>>> chain.get_independencies()
(X _|_ Z | Y)
(Z _|_ X | Y)
"""
independencies = Independencies()
for start in self.nodes():
rest = set(self.nodes()) - {start}
for r in range(len(rest)):
for observed in itertools.combinations(rest, r):
d_seperated_variables = (
rest
- set(observed)
- set(self.active_trail_nodes(start, observed=observed)[start])
)
if d_seperated_variables:
independencies.add_assertions(
[start, d_seperated_variables, observed]
)
independencies.reduce()
if not latex:
return independencies
else:
return independencies.latex_string()
def local_independencies(self, variables):
"""
Returns | |
from __future__ import print_function, division
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from torchdiffeq import odeint_adjoint as odeint
import matplotlib.pyplot as plt
import json
class GeneralModeDataset(Dataset):
"""
A PyTorch Dataset to handle general mode AFM data.
"""
def __init__(self, t, d_array, z_array, ode_params):
"""
Parameters
----------
t : 1D Numpy array
1D Numpy array containing the time stamps corresponding to the ODE solution x(t)
ode_params : dict
Dictionary containing the necessary parameters for the ODE.
Required form is ode_params = {'A0' : float, 'Q' : float, 'Om' : float, 'k' : float}
"""
# Needs modifying - in the final form, we do not necessarily need x0 in both the model and the dataset
self.t = np.array(t)
self.d_array = np.array(d_array)
self.z_array = np.array(z_array)
self.ode_params = ode_params
# Need to modify x0_array later
self.x0_array = np.zeros((self.d_array.size, 2))
self.x0_array[:,1] = self.d_array
def __len__(self):
return len(self.d_array)
def __getitem__(self, idx):
#sample = {'time': self.t, 'd': self.d_list[idx], 'x0': self.x0, 'z': self.z_list[idx][:]}
sample = {'time': self.t, 'd': self.d_array[idx], 'x0': self.x0_array[idx][:],'z': self.z_array[idx][:]}
return sample
def __eq__(self, other):
"""
Comparison between two GeneralModeDataset objects. True if both objects have the same ODE parameters.
"""
return self.ode_params == other.ode_params
def PlotData(self, idx, figsize = (7, 5), fontsize = 14):
data = self.__getitem__(idx)
fig, ax = plt.subplots(1, 1, figsize = figsize)
ax.plot(data['time'], data['z'], '.k')
ax.grid(ls = '--')
ax.set_xlabel('Normalized Time', fontsize = fontsize)
ax.set_ylabel('z (nm)', fontsize = fontsize)
return fig, ax
def save(self, savepath):
"""
Saves the given dataset in json format.
Parameters
----------
savepath : path
Path to save the dataset at.
"""
with open(savepath, 'w') as savefile:
json.dump(self.__dict__, savefile)
print('Saved data to: {}'.format(savepath))
@classmethod
def load(cls, loadpath):
"""
Loads the dataset from a json file.
Parameters
----------
loadpath : path
Path to the json file to be loaded.
Returns
-------
dataset : An instance of the class GeneralModeDataset
Loaded dataset.
"""
with open(loadpath) as loadfile:
data_dict = json.load(loadfile)
return cls(**data_dict)
class F_cons(nn.Module):
"""
A PyTorch module to model the conservative force experienced by the atomic force microscope probe.
We assume that the force only depends on z, and model it using a simple MLP.
...
Attributes
----------
hidden_nodes : list of int
List of the nodes in each of the hidden layers of the model
layers : list of torch.nn.Linear objects
List of layers in the model. All the layers used are fully connected layers.
elu : torch.nn.ELU object
Elu activation layer, used for the activations between the hidden layers.
tanh : torch.nn.Tanh object
Tanh activation layer, used for the actvation for the model output.
Methods
-------
forward(z)
Returns the neural network output as a function of input z.
z is assumed to be a tensor with size [1].
"""
def __init__(self, hidden_nodes = [4]):
"""
Parameters
----------
hidden_nodes : list of int
List of the nodes in each of the hidden layers of the model
"""
super(F_cons, self).__init__()
self.hidden_nodes = list(hidden_nodes)
self.layers = nn.ModuleList()
for i in range(len(self.hidden_nodes)):
if i == 0:
self.layers.append(nn.Linear(1, self.hidden_nodes[i]))
else:
self.layers.append(nn.Linear(self.hidden_nodes[i-1], self.hidden_nodes[i]))
self.layers.append(nn.Linear(self.hidden_nodes[-1], 1))
self.elu = nn.ELU()
self.tanh = nn.Tanh()
# Initialize weights and biases
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=1.0e-4)
nn.init.constant_(m.bias, val=0)
def forward(self, z):
"""
Returns the neural network output as a function of input z.
z is assumed to be a tensor with size [batch_size, 1].
Parameters
----------
z : tensor with dimensions [batch_size, 1].
Neural network input. Represents the instantaneous tip-sample distance.
Returns
-------
F : tensor with dimensions [batch_size, 1].
Neural network output. Represents the modeled tip-sample force.
"""
interm = self.layers[0](z)
for layer in self.layers[1:]:
interm = self.tanh(interm)
interm = layer(interm)
#F = self.tanh(interm)
return interm
class AFM_NeuralODE(nn.Module):
"""
A Pytorch module to create a NeuralODE modeling the AFM tip-sample dynamics.
Note that all length scales involved in the model are scaled by setting 1[nm] = 1
and all timescales scaled to w0*1[s] = 1.
...
Attributes
----------
Fc : An instance of class F_cons
A MLP model to represent the conservative force between the tip and the sample.
nfe : int
Number of forward evaluations. Incremented by 1 everytime forward() is evaluated.
d : float
Mean tip-sample distance in units of [nm].
A0 : float
Free amplitude of the tip at resonance in units of [nm].
Follows the definition outlined in M. Lee et. al., Phys. Rev. Lett. 97, 036104 (2006).
Om : float
Ratio between the resonance frequency f0 and the driving frequency f. Om = f/f0
Q : float
Q-factor of the cantilever/QTF.
k : float
Spring constant of the cantilever/QTF in units of [N/m].
Methods
-------
forward(t, x)
Returns the right-hand-side of the differential equation dx/dt = f(t, x)
x is a length 2 vector of the form x = [y, z], where y = dz/dt
"""
def __init__(self, A0, Om, Q, k, hidden_nodes = [4]):
"""
Parameters
----------
hidden_nodes : list of int
List of the nodes in each of the hidden layers of the model.
A0 : float
Free amplitude of the tip at resonance in units of [nm].
Follows the definition outlined in M. Lee et. al., Phys. Rev. Lett. 97, 036104 (2006).
Om : float
Ratio between the resonance frequency f0 and the driving frequency f. Om = f/f0
Q : float
Q-factor of the cantilever/QTF.
k : float
Spring constant of the cantilever/QTF in units of [N/m].
d : PyTorch tensor with dimensions [batch_size, 1]
Batched mean tip-sample distance in units of [nm]. Initialized to None.
"""
super(AFM_NeuralODE, self).__init__()
self.Fc = F_cons(hidden_nodes)
self.nfe = 0
self.d = None
self.A0 = A0
self.Om = Om
self.Q = Q
self.k = k
# Constant tensors to be used in the model
self.C1 = torch.tensor([[-1./self.Q, -1.], [1., 0.]], device = torch.device("cuda")).unsqueeze(0) # Has size = (1, 2, 2)
self.C2 = torch.tensor([[1.],[0.]], device = torch.device("cuda")).unsqueeze(0) # Has size = (1, 2, 1)
self.register_buffer('Constant 1', self.C1)
self.register_buffer('Constant 2', self.C2)
def forward(self, t, x):
"""
Returns the right-hand-side of the differential equation dx/dt = f(t, x)
x is a PyTorch tensor with size [batch_size, 2], where the second dimension corresponds to length 2 vector of the x = [y, z], where y = dz/dt
Parameters
----------
t : float
Time
x : PyTorch tensor with dimensions [batch_size, 2]
The second dimension correspond to x = [y, z], where y = dz/dt
Returns
-------
dxdt : PyTorch tensor with dimensions [batch_size, 2]
The second dimension corresponds to dxdt = [dy/dt, dz/dt]
"""
self.nfe += 1
F = self.Fc(x[:, 1:])
# The first term is broadcasted matrix multiplication of (1, 2, 2) * (b, 2, 1) = (b, 2, 1), where b = self.batch_size.
# The second term is broadcasted matrix multiplication of (1, 2, 1) * (b, 1, 1) = (b, 2, 1)
ode = torch.matmul(self.C1, x.unsqueeze(-1)) + torch.matmul(self.C2, (self.d + (self.A0/self.Q)*torch.cos(self.Om*t) + F/self.k).unsqueeze(-1))
# Squeeze to return a tensor of shape (b, 2)
return ode.squeeze(-1)
class LightningTrainer(pl.LightningModule):
"""
A PyTorch-Lightning LightningModule for training the NeuralODE created by the class AFM_NeuralODE.
...
Attributes
----------
ODE : An instance of class AFM_NeuralODE
A NeuralODE model to represent the dynamics between the tip and the sample.
hparams : An argparse.Namespace object with fields 'train_dataset', 'hidden_nodes', 'batch_size', 'lr', and 'solver'.
Hyperparameters of the model.
'train_dataset' : An instance of the class GeneralModeDataset
A PyTorch dataset of a given general mode approach data to train the NeuralODE with.
'hidden nodes' : list or array of integers
List of number of nodes in the hidden layers of the model.
'batch_size' : integer
Batch_size of the model
'lr' : float
Learning rate
'solver' : string, must be compatible with TorchDiffEq.odeint_adjoint
Type of ODE solver to be used to solve the NeuralODE
"""
#def __init__(self, train_dataset, lr = 0.05, hidden_nodes = [10, | |
signature
"""
pass
def addAttribute(*args, **kwargs):
"""
addAttribute(MObject node, MObject attribute) -> self
Adds an operation to the modifier to add a new dynamic attribute to the
given dependency node. If the attribute is a compound its children will
be added as well, so only the parent needs to be added using this method.
"""
pass
def addExtensionAttribute(*args, **kwargs):
"""
addExtensionAttribute(MNodeClass nodeClass, MObject attribute) -> self
Adds an operation to the modifier to add a new extension attribute to
the given node class. If the attribute is a compound its children will be
added as well, so only the parent needs to be added using this method.
"""
pass
def commandToExecute(*args, **kwargs):
"""
commandToExecute(command) -> self
Adds an operation to the modifier to execute a MEL command. The specified
command must be undoable otherwise unexpected results may occur. It is
best to use multiple commandToExecute() calls rather than batching
multiple commands into one call to commandToExecute(). They will still
be undone together, as a single undo action by the user, but Maya will
better be able to recover if one of the commands fails.
"""
pass
def connect(*args, **kwargs):
"""
connect(MPlug source, MPlug dest) -> self
connect(MObject sourceNode, MObject sourceAttr,
MObject destNode, MObject destAttr) -> self
Adds an operation to the modifier that connects two plugs in the
dependency graph. It is the user's responsibility to ensure that the
source and destination attributes are of compatible types. For instance,
if the source attribute is a nurbs surface then the destination must
also be a nurbs surface.
Plugs can either be specified with node and attribute MObjects or with
MPlugs.
"""
pass
def createNode(*args, **kwargs):
"""
createNode(typeName) -> MObject
createNode(MTypeId typeId) -> MObject
Adds an operation to the modifier to create a node of the given type.
The new node is created and returned but will not be added to the
Dependency Graph until the modifier's doIt() method is called. Raises
TypeError if the named node type does not exist or if it is a DAG node
type.
"""
pass
def deleteNode(*args, **kwargs):
"""
deleteNode(MObject node) -> self
Adds an operation to the modifer which deletes the specified node from
the Dependency Graph. If the modifier already contains other operations
on the same node (e.g. a disconnect) then they should be committed by
calling the modifier's doIt() before the deleteNode operation is added.
"""
pass
def disconnect(*args, **kwargs):
"""
disconnect(MPlug source, MPlug dest) -> self
disconnect(MObject sourceNode, MObject sourceAttr,
MObject destNode, MObject destAttr) -> self
Adds an operation to the modifier that breaks a connection between two
plugs in the dependency graph.
Plugs can either be specified with node and attribute MObjects or with
MPlugs.
"""
pass
def doIt(*args, **kwargs):
"""
doIt() -> self
Executes the modifier's operations. If doIt() is called multiple times
in a row, without any intervening calls to undoIt(), then only the
operations which were added since the previous doIt() call will be
executed. If undoIt() has been called then the next call to doIt() will
do all operations.
"""
pass
def linkExtensionAttributeToPlugin(*args, **kwargs):
"""
linkExtensionAttributeToPlugin(MObject plugin, MObject attribute) -> self
The plugin can call this method to indicate that the extension attribute
defines part of the plugin, regardless of the node type to which it
attaches itself. This requirement is used when the plugin is checked to
see if it is in use or if is able to be unloaded or if it is required as
part of a stored file. For compound attributes only the topmost parent
attribute may be passed in and all of its children will be included,
recursively. Thus it's not possible to link a child attribute to a
plugin by itself. Note that the link is established immediately and is
not affected by the modifier's doIt() or undoIt() methods.
"""
pass
def newPlugValue(*args, **kwargs):
"""
newPlugValue(MPlug plug, MObject value) -> self
Adds an operation to the modifier to set the value of a plug, where
value is an MObject data wrapper, such as created by the various
MFn*Data classes.
"""
pass
def newPlugValueBool(*args, **kwargs):
"""
newPlugValueBool(MPlug plug, bool value) -> self
Adds an operation to the modifier to set a value onto a bool plug.
"""
pass
def newPlugValueChar(*args, **kwargs):
"""
newPlugValueChar(MPlug plug, int value) -> self
Adds an operation to the modifier to set a value onto a char (single
byte signed integer) plug.
"""
pass
def newPlugValueDouble(*args, **kwargs):
"""
newPlugValueDouble(MPlug plug, float value) -> self
Adds an operation to the modifier to set a value onto a double-precision
float plug.
"""
pass
def newPlugValueFloat(*args, **kwargs):
"""
newPlugValueFloat(MPlug plug, float value) -> self
Adds an operation to the modifier to set a value onto a single-precision
float plug.
"""
pass
def newPlugValueInt(*args, **kwargs):
"""
newPlugValueInt(MPlug plug, int value) -> self
Adds an operation to the modifier to set a value onto an int plug.
"""
pass
def newPlugValueMAngle(*args, **kwargs):
"""
newPlugValueMAngle(MPlug plug, MAngle value) -> self
Adds an operation to the modifier to set a value onto an angle plug.
"""
pass
def newPlugValueMDistance(*args, **kwargs):
"""
newPlugValueMDistance(MPlug plug, MDistance value) -> self
Adds an operation to the modifier to set a value onto a distance plug.
"""
pass
def newPlugValueMTime(*args, **kwargs):
"""
newPlugValueMTime(MPlug plug, MTime value) -> self
Adds an operation to the modifier to set a value onto a time plug.
"""
pass
def newPlugValueShort(*args, **kwargs):
"""
newPlugValueShort(MPlug plug, int value) -> self
Adds an operation to the modifier to set a value onto a short
integer plug.
"""
pass
def newPlugValueString(*args, **kwargs):
"""
newPlugValueString(MPlug plug, string value) -> self
Adds an operation to the modifier to set a value onto a string plug.
"""
pass
def removeAttribute(*args, **kwargs):
"""
removeAttribute(MObject node, MObject attribute) -> self
Adds an operation to the modifier to remove a dynamic attribute from the
given dependency node. If the attribute is a compound its children will
be removed as well, so only the parent needs to be removed using this
method. The attribute MObject passed in will be set to kNullObj. There
should be no function sets attached to the attribute at the time of the
call as their behaviour may become unpredictable.
"""
pass
def removeExtensionAttribute(*args, **kwargs):
"""
removeExtensionAttribute(MNodeClass nodeClass, MObject attribute) -> self
Adds an operation to the modifier to remove an extension attribute from
the given node class. If the attribute is a compound its children will
be removed as well, so only the parent needs to be removed using this
method. The attribute MObject passed in will be set to kNullObj. There
should be no function sets attached to the attribute at the time of the
call as their behaviour may become unpredictable.
"""
pass
def removeExtensionAttributeIfUnset(*args, **kwargs):
"""
removeExtensionAttributeIfUnset(MNodeClass nodeClass,
MObject attribute) -> self
Adds an operation to the modifier to remove an extension attribute from
the given node class, but only if there are no nodes in the graph with
non-default values for this attribute. If the attribute is a compound
its children will be removed as well, so only the parent needs to be
removed using this method. The attribute MObject passed in will be set
to kNullObj. There should be no function sets attached | |
Err(de::Error::missing_field(".tag"))')
with self.block(u'match tag'):
for subtype in struct.get_enumerated_subtypes():
variant_name = self.enum_variant_name(subtype)
if isinstance(subtype.data_type, ir.Void):
self.emit(u'"{}" => Ok({}::{}),'
.format(subtype.name, type_name, variant_name))
elif isinstance(ir.unwrap_aliases(subtype.data_type)[0], ir.Struct) \
and not subtype.data_type.has_enumerated_subtypes():
self.emit(u'"{}" => Ok({}::{}({}::internal_deserialize(map)?)),'
.format(subtype.name,
type_name,
variant_name,
self._rust_type(subtype.data_type)))
else:
with self.block(u'"{}" =>'.format(subtype.name)):
with self.block(u'if map.next_key()? != Some("{}")'
.format(subtype.name)):
self.emit(u'Err(de::Error::missing_field("{}"));'
.format(subtype.name))
self.emit(u'Ok({}::{}(map.next_value()?))'
.format(type_name, variant_name))
if struct.is_catch_all():
with self.block(u'_ =>'):
# TODO(wfraser): it'd be cool to grab any fields in the parent,
# which are common to all variants, and stick them in the
# '_Unknown' enum vaiant.
# For now, just consume them and return a nullary variant.
self.emit(u'crate::eat_json_fields(&mut map)?;')
self.emit(u'Ok({}::_Unknown)'.format(type_name))
else:
self.emit(u'_ => Err(de::Error::unknown_variant(tag, VARIANTS))')
self.generate_multiline_list(
list(u'"{}"'.format(subtype.name)
for field in struct.get_enumerated_subtypes()),
before='const VARIANTS: &[&str] = &',
after=';',
delim=(u'[', u']'))
self.emit(u'deserializer.deserialize_struct("{}", VARIANTS, EnumVisitor)'.format(
struct.name))
self.emit()
with self._impl_serialize(type_name):
self.emit(u'// polymorphic struct serializer')
self.emit(u'use serde::ser::SerializeStruct;')
with self.block(u'match *self'):
for subtype in struct.get_enumerated_subtypes():
variant_name = self.enum_variant_name(subtype)
with self.block(u'{}::{}(ref x) =>'.format(type_name, variant_name)):
self.emit(u'let mut s = serializer.serialize_struct("{}", {})?;'
.format(type_name, len(subtype.data_type.all_fields) + 1))
self.emit(u's.serialize_field(".tag", "{}")?;'.format(subtype.name))
for field in subtype.data_type.all_fields:
self.emit(u's.serialize_field("{}", &x.{})?;'
.format(field.name,
self.field_name(field)))
self.emit(u's.end()')
if struct.is_catch_all():
self.emit(u'{}::_Unknown => Err(::serde::ser::Error::custom("cannot serialize '
u'unknown variant"))'.format(
type_name))
self.emit()
def _impl_serde_for_union(self, union):
type_name = self.enum_name(union)
with self._impl_deserialize(type_name):
self.emit(u'// union deserializer')
self.emit(u'use serde::de::{self, MapAccess, Visitor};')
self.emit(u'struct EnumVisitor;')
with self.block(u'impl<\'de> Visitor<\'de> for EnumVisitor'):
self.emit(u'type Value = {};'.format(type_name))
with self.emit_rust_function_def(
u'expecting',
[u'&self', u'f: &mut ::std::fmt::Formatter<\'_>'],
u'::std::fmt::Result'):
self.emit(u'f.write_str("a {} structure")'.format(union.name))
with self.emit_rust_function_def(
u'visit_map<V: MapAccess<\'de>>',
[u'self', u'mut map: V'],
u'Result<Self::Value, V::Error>'):
with self.block(u'let tag: &str = match map.next_key()?', after=';'):
self.emit(u'Some(".tag") => map.next_value()?,')
self.emit(u'_ => return Err(de::Error::missing_field(".tag"))')
if len(union.all_fields) == 1 and union.all_fields[0].catch_all:
self.emit(u'// open enum with no defined variants')
self.emit(u'let _ = tag;') # hax
self.emit(u'crate::eat_json_fields(&mut map)?;')
self.emit(u'Ok({}::Other)'.format(type_name))
else:
with self.block(u'match tag'):
for field in union.all_fields:
if field.catch_all:
# Handle the 'Other' variant at the end.
continue
variant_name = self.enum_variant_name(field)
ultimate_type = ir.unwrap(field.data_type)[0]
if isinstance(field.data_type, ir.Void):
with self.block(u'"{}" =>'.format(field.name)):
self.emit(u'crate::eat_json_fields(&mut map)?;')
self.emit(u'Ok({}::{})'.format(type_name, variant_name))
elif isinstance(ultimate_type, ir.Struct) \
and not ultimate_type.has_enumerated_subtypes():
if isinstance(ir.unwrap_aliases(field.data_type)[0], ir.Nullable):
# A nullable here means we might have more fields that can be
# deserialized into the inner type, or we might have nothing,
# meaning None.
if not ultimate_type.all_required_fields:
raise RuntimeError('{}.{}: an optional struct with no'
' required fields is ambiguous'
.format(union.name, field.name))
self.emit(u'"{}" => Ok({}::{}({}::internal_deserialize_opt('
u'map, true)?)),'
.format(field.name,
type_name,
variant_name,
self._rust_type(ultimate_type)))
else:
self.emit(u'"{}" => Ok({}::{}({}::internal_deserialize(map)?)),'
.format(field.name,
type_name,
variant_name,
self._rust_type(field.data_type)))
else:
with self.block(u'"{}" =>'.format(field.name)):
with self.block(u'match map.next_key()?'):
self.emit(u'Some("{}") => Ok({}::{}(map.next_value()?)),'
.format(field.name,
type_name,
variant_name))
if isinstance(ir.unwrap_aliases(field.data_type)[0],
ir.Nullable):
# if it's null, the field can be omitted entirely
self.emit(u'None => Ok({}::{}(None)),'
.format(type_name, variant_name))
else:
self.emit(u'None => Err('
u'de::Error::missing_field("{}")),'
.format(field.name))
self.emit(u'_ => Err(de::Error::unknown_field('
u'tag, VARIANTS))')
if not union.closed:
with self.block(u'_ =>'):
self.emit(u'crate::eat_json_fields(&mut map)?;')
self.emit(u'Ok({}::Other)'.format(type_name))
else:
self.emit(u'_ => Err(de::Error::unknown_variant(tag, VARIANTS))')
self.generate_multiline_list(
list(u'"{}"'.format(field.name) for field in union.all_fields),
before='const VARIANTS: &[&str] = &',
after=';',
delim=(u'[', u']'),)
self.emit(u'deserializer.deserialize_struct("{}", VARIANTS, EnumVisitor)'.format(
union.name))
self.emit()
with self._impl_serialize(type_name):
self.emit(u'// union serializer')
if len(union.all_fields) == 1 and union.all_fields[0].catch_all:
# special case: an open union with no variants defined.
self.emit(u'#![allow(unused_variables)]')
self.emit(u'Err(::serde::ser::Error::custom("cannot serialize an open union with '
u'no defined variants"))')
else:
self.emit(u'use serde::ser::SerializeStruct;')
with self.block(u'match *self'):
for field in union.all_fields:
if field.catch_all:
# Handle the 'Other' variant at the end.
continue
variant_name = self.enum_variant_name(field)
if isinstance(field.data_type, ir.Void):
with self.block(u'{}::{} =>'.format(type_name, variant_name)):
self.emit(u'// unit')
self.emit(u'let mut s = serializer.serialize_struct("{}", 1)?;'
.format(union.name))
self.emit(u's.serialize_field(".tag", "{}")?;'.format(field.name))
self.emit(u's.end()')
else:
ultimate_type = ir.unwrap(field.data_type)[0]
needs_x = not (isinstance(field.data_type, ir.Struct)
and not field.data_type.all_fields)
ref_x = 'ref x' if needs_x else '_'
with self.block(u'{}::{}({}) =>'.format(
type_name, variant_name, ref_x)):
if self.is_enum_type(ultimate_type):
# Inner type is a union or polymorphic struct; need to always
# emit another nesting level.
self.emit(u'// union or polymporphic struct')
self.emit(u'let mut s = serializer.serialize_struct("{}", 2)?;'
.format(union.name))
self.emit(u's.serialize_field(".tag", "{}")?;'
.format(field.name))
self.emit(u's.serialize_field("{}", x)?;'.format(field.name))
self.emit(u's.end()')
elif isinstance(ir.unwrap_aliases(field.data_type)[0], ir.Nullable):
self.emit(u'// nullable (struct or primitive)')
# If it's nullable and the value is None, just emit the tag and
# nothing else, otherwise emit the fields directly at the same
# level.
num_fields = 1 if ir.is_primitive_type(ultimate_type) \
else len(ultimate_type.all_fields) + 1
self.emit(u'let n = if x.is_some() {{ {} }} else {{ 1 }};'
.format(num_fields + 1))
self.emit(u'let mut s = serializer.serialize_struct("{}", n)?;'
.format(union.name))
self.emit(u's.serialize_field(".tag", "{}")?;'
.format(field.name))
with self.block(u'if let Some(ref x) = x'):
if ir.is_primitive_type(ultimate_type):
self.emit(u's.serialize_field("{}", &x)?;'
.format(field.name))
else:
self.emit(u'x.internal_serialize::<S>(&mut s)?;')
self.emit(u's.end()')
elif isinstance(ultimate_type, ir.Struct):
self.emit(u'// struct')
self.emit(u'let mut s = serializer.serialize_struct("{}", {})?;'
.format(union.name,
len(ultimate_type.all_fields) + 1))
self.emit(u's.serialize_field(".tag", "{}")?;'
.format(field.name))
if ultimate_type.all_fields:
self.emit(u'x.internal_serialize::<S>(&mut s)?;')
self.emit(u's.end()')
else:
self.emit(u'// primitive')
self.emit(u'let mut s = serializer.serialize_struct("{}", 2)?;'
.format(union.name))
self.emit(u's.serialize_field(".tag", "{}")?;'
.format(field.name))
self.emit(u's.serialize_field("{}", x)?;'.format(field.name))
self.emit(u's.end()')
if not union.closed:
self.emit(u'{}::Other => Err(::serde::ser::Error::custom('
u'"cannot serialize \'Other\' variant"))'
.format(type_name))
self.emit()
# Helpers
def _emit_doc(self, doc_string, prefix=u'///'):
if doc_string is not None:
for idx, chunk in enumerate(doc_string.split(u'\n\n')):
if idx != 0: self.emit(prefix)
docf = lambda tag, val: self._docf(tag, val)
self.emit_wrapped_text(
self.process_doc(chunk, docf),
prefix=prefix + u' ', width=100)
def _docf(self, tag, val):
if tag == 'route':
if ':' in val:
val, version = val.split(':')
version = int(version)
else:
version = 1
if '.' in val:
ns, route = val.split('.')
rust_fn = self.route_name_raw(route, version)
label = ns + '::' + rust_fn
target = 'super::' + label
else:
target = self.route_name_raw(val, version)
label = target
return '[`{}()`]({})'.format(label, target)
elif tag == 'field':
if '.' in val:
cls_name, field = val.rsplit('.', 1)
assert('.' not in cls_name) # dunno if this is even allowed, but we don't handle it
typ = self._all_types[self._current_namespace][cls_name]
type_name = self._rust_type(typ)
if self.is_enum_type(typ):
if isinstance(typ, ir.Struct) and typ.has_enumerated_subtypes() \
and field in (field.name for field in typ.fields):
# This is actually a link to a field in a polymorphic struct, not a enum
# variant. Because Rust doesn't have polymorphism, we make the fields be
# present on all enum variants, so this is a link to a field in the current
# type. Rustdoc doesn't let you link to a field, just the type, but we're
# already at that page, so don't bother with emitting an actual link.
# Hopefully we're documenting one of the variants right now, or else this
# is going to look weird.
field = self.field_name_raw(field)
return '`{}`'.format(field)
field = self.enum_variant_name_raw(field)
return '[`{}::{}`]({}::{})'.format(type_name, field, type_name, field)
else:
field = self.field_name_raw(field)
# we can't link to the field itself, so just link to the struct
return '[`{}::{}`]({})'.format(type_name, field, type_name)
else:
# link is relative to the current type
type_name = self._rust_type(self._current_type)
if self.is_enum_type(self._current_type):
variant_name = self.enum_variant_name_raw(val)
return '[`{}`]({}::{})'.format(
variant_name, type_name, variant_name)
else:
field_name = self.field_name_raw(val)
# we could, but don't bother linking to the struct because we're already there.
# return '[`{}`]({})'.format(field_name, current_rust_type)
return '`{}`'.format(field_name)
elif tag == 'type':
if '.' in val:
ns, typ_name = val.split('.')
typ = self._all_types[ns][typ_name]
rust_name = self._rust_type(typ, no_qualify=True)
full_rust_name = self._rust_type(typ)
return '[`{}::{}`]({})'.format(
ns, rust_name, full_rust_name)
else:
typ = self._all_types[self._current_namespace][val]
rust_name = self._rust_type(typ)
return '[`{}`]({})'.format(rust_name, rust_name)
elif tag == 'link':
title, url = val.rsplit(' ', 1)
return '[{}]({})'.format(title, url)
elif tag == 'val':
if val == 'null':
return '`None`'
else:
return '`{}`'.format(val)
else:
print("WARNING: unrecognized link tag '{}'".format(tag))
return '`{}`'.format(val)
@contextmanager
def _impl_deserialize(self, type_name):
with self.block(u'impl<\'de> ::serde::de::Deserialize<\'de> for {}'.format(type_name)), \
self.emit_rust_function_def(
u'deserialize<D: ::serde::de::Deserializer<\'de>>',
[u'deserializer: D'],
u'Result<Self, D::Error>'):
yield
@contextmanager
def _impl_serialize(self, type_name):
with self.block(u'impl ::serde::ser::Serialize for {}'.format(type_name)), \
self.emit_rust_function_def(
u'serialize<S: ::serde::ser::Serializer>',
[u'&self', u'serializer: S'],
u'Result<S::Ok, S::Error>'):
yield
def _impl_default_for_struct(self, struct):
struct_name = self.struct_name(struct)
with self.block(u'impl Default for {}'.format(struct_name)):
with self.emit_rust_function_def(u'default', [], u'Self'):
with self.block(struct_name):
for field in struct.all_fields:
self.emit(u'{}: {},'.format(
self.field_name(field), self._default_value(field)))
def _impl_struct(self, struct):
return self.block(u'impl {}'.format(self.struct_name(struct)))
def _emit_new_for_struct(self, struct):
struct_name = self.struct_name(struct)
first = True
if struct.all_required_fields:
with self.emit_rust_function_def(
u'new',
[u'{}: {}'.format(self.field_name(field), self._rust_type(field.data_type))
for field in struct.all_required_fields],
u'Self',
access=u'pub'):
with self.block(struct_name):
for field in struct.all_required_fields:
# shorthand assignment
self.emit(u'{},'.format(self.field_name(field)))
for field in struct.all_optional_fields:
self.emit(u'{}: {},'.format(
self.field_name(field),
self._default_value(field)))
first = False
for field in struct.all_optional_fields:
if first:
first = False
else:
self.emit()
field_name = self.field_name(field)
if isinstance(field.data_type, ir.Nullable):
# If it's a nullable type, the default is always None. Change the argument type to
# the inner type, because if the user is using builder methods it means they don't
# want the default, so making them type 'Some(...)' is redundant.
field_type = field.data_type.data_type
value | |
'Joy_13': {'Type': 'Digital', 'x': 1104, 'y': 1500, 'width': 572}, # TGL 1 down
'Joy_14': {'Type': 'Digital', 'x': 1164, 'y': 1304, 'width': 572}, # TGL 2 up
'Joy_15': {'Type': 'Digital', 'x': 1164, 'y': 1360, 'width': 572}, # TGL 2 down
'Joy_16': {'Type': 'Digital', 'x': 1224, 'y': 1168, 'width': 572}, # TGL 3 up
'Joy_17': {'Type': 'Digital', 'x': 1224, 'y': 1224, 'width': 572}, # TGL 3 down
'Joy_18': {'Type': 'Digital', 'x': 1224, 'y': 1026, 'width': 572}, # TGL 4 up
'Joy_19': {'Type': 'Digital', 'x': 1224, 'y': 1082, 'width': 572}, # TGL 4 down
'Joy_20': {'Type': 'Digital', 'x': 1154, 'y': 534, 'width': 772}, # H3 up
'Joy_21': {'Type': 'Digital', 'x': 1154, 'y': 590, 'width': 772}, # H3 right
'Joy_22': {'Type': 'Digital', 'x': 1154, 'y': 646, 'width': 772}, # H3 down
'Joy_23': {'Type': 'Digital', 'x': 1154, 'y': 702, 'width': 772}, # H3 left
'Joy_24': {'Type': 'Digital', 'x': 1364, 'y': 1924, 'width': 772}, # H4 up
'Joy_25': {'Type': 'Digital', 'x': 1364, 'y': 1980, 'width': 772}, # H4 right
'Joy_26': {'Type': 'Digital', 'x': 1364, 'y': 2036, 'width': 772}, # H4 down
'Joy_27': {'Type': 'Digital', 'x': 1364, 'y': 2092, 'width': 772}, # H4 left
'Joy_28': {'Type': 'Digital', 'x': 3124, 'y': 2034, 'width': 692}, # Pinky rocker up
'Joy_29': {'Type': 'Digital', 'x': 3124, 'y': 2090, 'width': 692}, # Pinky rocker down
'Joy_30': {'Type': 'Digital', 'x': 3134, 'y': 1884, 'width': 692}, # Pinky spin up
'Joy_31': {'Type': 'Digital', 'x': 3134, 'y': 1940, 'width': 692}, # Pinky spin down
'Joy_32': {'Type': 'Digital', 'x': 1224, 'y': 894, 'width': 642}, # Stick press
'Joy_XAxis': {'Type': 'Analogue', 'x': 84, 'y': 784, 'width': 572}, # Throttle (left/both)
'Joy_YAxis': {'Type': 'Analogue', 'x': 124, 'y': 694, 'width': 572}, # Throttle (right)
'Joy_ZAxis': {'Type': 'Analogue', 'x': 1874, 'y': 1604, 'width': 672}, # F spin
'Joy_RXAxis': {'Type': 'Analogue', 'x': 1224, 'y': 838, 'width': 642}, # Stick x
'Joy_RYAxis': {'Type': 'Analogue', 'x': 1224, 'y': 782, 'width': 642}, # Stick y
'Joy_RZAxis': {'Type': 'Analogue', 'x': 1534, 'y': 1754, 'width': 672}, # G spin
'Joy_UAxis': {'Type': 'Analogue', 'x': 1014, 'y': 1588, 'width': 692}, # RTY3 axis
'Joy_VAxis': {'Type': 'Analogue', 'x': 854, 'y': 1668, 'width': 692}, # RTY4 axis
},
'CHCombatStick': {
'Joy_1': {'Type': 'Digital', 'x': 1974, 'y': 854, 'width': 592}, # Primary trigger
'Joy_2': {'Type': 'Digital', 'x': 2984, 'y': 944, 'width': 832}, # Thumb button
'Joy_3': {'Type': 'Digital', 'x': 2964, 'y': 654, 'width': 832}, # Top button
'Joy_4': {'Type': 'Digital', 'x': 2084, 'y': 1159, 'width': 592}, # Pinky trigger
'Joy_5': {'Type': 'Digital', 'x': 1754, 'y': 234, 'width': 892}, # Side trigger
'Joy_6': {'Type': 'Digital', 'x': 1664, 'y': 334, 'width': 892}, # Top trigger
'Joy_7': {'Type': 'Digital', 'x': 1689, 'y': 464, 'width': 832}, # PoV hat up
'Joy_8': {'Type': 'Digital', 'x': 1689, 'y': 520, 'width': 832}, # PoV hat right
'Joy_9': {'Type': 'Digital', 'x': 1689, 'y': 576, 'width': 832}, # PoV hat down
'Joy_10': {'Type': 'Digital', 'x': 1689, 'y': 632, 'width': 832}, # PoV hat left
'Joy_POV1Up': {'Type': 'Digital', 'x': 2964, 'y': 212, 'width': 832}, # Right hat up
'Joy_POV1Right': {'Type': 'Digital', 'x': 2964, 'y': 268, 'width': 832}, # Right hat right
'Joy_POV1Down': {'Type': 'Digital', 'x': 2964, 'y': 324, 'width': 832}, # Right hat down
'Joy_POV1Left': {'Type': 'Digital', 'x': 2964, 'y': 380, 'width': 832}, # Right hat left
'Joy_XAxis': {'Type': 'Analogue', 'x': 3124, 'y': 1168, 'width': 692}, # Stick X axis
'Joy_YAxis': {'Type': 'Analogue', 'x': 3124, 'y': 1112, 'width': 692}, # Stick Y axis
'Joy_ZAxis': {'Type': 'Analogue', 'x': 2254, 'y': 1894, 'width': 692}, # Wheel on left of base
},
'CHFighterStick': {
'Joy_1': {'Type': 'Digital', 'x': 1974, 'y': 854, 'width': 592}, # Primary trigger
'Joy_2': {'Type': 'Digital', 'x': 1664, 'y': 334, 'width': 892}, # Top trigger
'Joy_3': {'Type': 'Digital', 'x': 1754, 'y': 234, 'width': 892}, # Side trigger
'Joy_4': {'Type': 'Digital', 'x': 2084, 'y': 1159, 'width': 592}, # Pinky trigger
'Joy_5': {'Type': 'Digital', 'x': 1689, 'y': 464, 'width': 832}, # Left hat up
'Joy_6': {'Type': 'Digital', 'x': 1689, 'y': 520, 'width': 832}, # Left hat right
'Joy_7': {'Type': 'Digital', 'x': 1689, 'y': 576, 'width': 832}, # Left hat down
'Joy_8': {'Type': 'Digital', 'x': 1689, 'y': 632, 'width': 832}, # Left hat left
'Joy_9': {'Type': 'Digital', 'x': 2994, 'y': 494, 'width': 832}, # Castle hat up
'Joy_10': {'Type': 'Digital', 'x': 2994, 'y': 550, 'width': 832}, # Castle hat right
'Joy_11': {'Type': 'Digital', 'x': 2994, 'y': 606, 'width': 832}, # Castle hat down
'Joy_12': {'Type': 'Digital', 'x': 2994, 'y': 662, 'width': 832}, # Castle hat left
'Joy_13': {'Type': 'Digital', 'x': 3058, 'y': 810, 'width': 772}, # Thumb hat up
'Joy_14': {'Type': 'Digital', 'x': 3058, 'y': 866, 'width': 772}, # Thumb hat right
'Joy_15': {'Type': 'Digital', 'x': 3058, 'y': 922, 'width': 772}, # Thumb hat down
'Joy_16': {'Type': 'Digital', 'x': 3058, 'y': 978, 'width': 772}, # Thumb hat left
'Joy_POV1Up': {'Type': 'Digital', 'x': 2964, 'y': 212, 'width': 832}, # POV hat up (looks like witch hat)
'Joy_POV1Right': {'Type': 'Digital', 'x': 2964, 'y': 268, 'width': 832}, # POV hat right (looks like witch hat)
'Joy_POV1Down': {'Type': 'Digital', 'x': 2964, 'y': 324, 'width': 832}, # POV hat down (looks like witch hat)
'Joy_POV1Left': {'Type': 'Digital', 'x': 2964, 'y': 380, 'width': 832}, # POV hat left (looks like witch hat)
'Joy_XAxis': {'Type': 'Analogue', 'x': 3124, 'y': 1112, 'width': 692}, # Stick X axis
'Joy_YAxis': {'Type': 'Analogue', 'x': 3124, 'y': 1168, 'width': 692}, # Stick Y axis
'Joy_ZAxis': {'Type': 'Analogue', 'x': 2254, 'y': 1894, 'width': 692}, # Wheel on left of base
},
'068EC0F3': { # CH FighterStick with CH manager
'displayName': 'CH FighterStick with CH manager',
'Joy_1': {'Type': 'Digital', 'x': 1974, 'y': 854, 'width': 592}, # Primary trigger
'Joy_2': {'Type': 'Digital', 'x': 1664, 'y': 334, 'width': 892}, # Top trigger
'Joy_3': {'Type': 'Digital', 'x': 1754, 'y': 234, 'width': 892}, # Side trigger
'Joy_4': {'Type': 'Digital', 'x': 2084, 'y': 1159, 'width': 592}, # Pinky trigger
'Joy_5': {'Type': 'Digital', 'x': 1689, 'y': 464, 'width': 832}, # Left hat up
'Joy_6': {'Type': 'Digital', 'x': 1689, 'y': 520, 'width': 832}, # Left hat right
'Joy_7': {'Type': 'Digital', 'x': 1689, 'y': 576, 'width': 832}, # Left hat down
'Joy_8': {'Type': 'Digital', 'x': 1689, 'y': 632, 'width': 832}, # Left hat left
'Joy_9': {'Type': 'Digital', 'x': 2994, 'y': 494, 'width': 832}, # Castle hat up
'Joy_10': {'Type': 'Digital', 'x': 2994, 'y': 550, 'width': 832}, # Castle hat right
'Joy_11': {'Type': 'Digital', 'x': 2994, 'y': 606, 'width': 832}, # Castle hat down
'Joy_12': {'Type': 'Digital', 'x': 2994, 'y': 662, 'width': 832}, # Castle hat left
'Joy_13': {'Type': 'Digital', 'x': 3058, 'y': 810, 'width': 772}, # Thumb hat up
'Joy_14': {'Type': 'Digital', 'x': 3058, 'y': 866, 'width': 772}, # Thumb hat right
'Joy_15': {'Type': 'Digital', 'x': 3058, 'y': 922, 'width': 772}, # Thumb hat down
'Joy_16': {'Type': 'Digital', 'x': 3058, 'y': 978, 'width': 772}, # Thumb hat left
'Joy_POV1Up': {'Type': 'Digital', 'x': 2964, 'y': 212, 'width': 832}, # POV hat up (looks like witch hat)
'Joy_POV1Right': {'Type': 'Digital', 'x': 2964, 'y': 268, 'width': 832}, # POV hat right (looks like witch hat)
'Joy_POV1Down': {'Type': 'Digital', 'x': 2964, 'y': 324, 'width': 832}, # POV hat down (looks like witch hat)
'Joy_POV1Left': {'Type': 'Digital', 'x': 2964, 'y': 380, 'width': 832}, # POV hat left (looks like witch hat)
'Joy_XAxis': {'Type': 'Analogue', 'x': 3124, 'y': 1112, 'width': 692}, # Stick X axis
'Joy_YAxis': {'Type': 'Analogue', 'x': 3124, 'y': 1168, 'width': 692}, # Stick Y axis
'Joy_ZAxis': {'Type': 'Analogue', 'x': 2254, 'y': 1894, 'width': 692}, # Wheel on left of base
},
'CHProThrottle1': {
'Joy_1': {'Type': 'Digital', 'x': 1114, 'y': 966, 'width': 832}, # Castle hat right
'Joy_2': {'Type': 'Digital', 'x': 1114, 'y': 1022, 'width': 832}, # Castle hat down
'Joy_3': {'Type': 'Digital', 'x': 1114, 'y': 1078, 'width': 832}, # Castle hat left
'Joy_4': {'Type': 'Digital', 'x': 1114, 'y': 910, 'width': 832}, # Castle hat up
'Joy_5': {'Type': 'Digital', 'x': 1189, 'y': 1696, 'width': 1032}, # Bottom hat right
'Joy_6': |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.