gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from contextlib import contextmanager
from oslo_log import log as logging
from oslo_utils import importutils
from neutron.i18n import _LE, _LI, _LW
from neutron.extensions import portbindings
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import config as config
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import constants as const
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import exceptions as cexc
LOG = logging.getLogger(__name__)
class CiscoUcsmDriver(object):
"""UCS Manager Driver Main Class."""
def __init__(self):
LOG.debug("UCS Manager Network driver found")
self.ucsmsdk = None
self.supported_sriov_vnic_types = [portbindings.VNIC_DIRECT,
portbindings.VNIC_MACVTAP]
self.supported_pci_devs = config.parse_pci_vendor_config()
self.ucsm_conf = config.UcsmConfig()
self.ucsm_host_dict = {}
self.ucsm_sp_dict = {}
self._create_ucsm_host_to_service_profile_mapping()
def check_vnic_type_and_vendor_info(self, vnic_type, profile):
"""Checks if this vnic_type and vendor device info are supported.
Returns True if:
1. the port vnic_type is direct or macvtap and
2. the vendor_id and product_id of the port is supported by
this MD
Useful in determining if this MD should bind the current
port.
"""
# Check for vnic_type
if vnic_type not in self.supported_sriov_vnic_types:
LOG.info(_LI('Non SR-IOV vnic_type: %s.'), vnic_type)
return False
if not profile:
return False
# Check for vendor_info
return self._check_for_supported_vendor(profile)
def _check_for_supported_vendor(self, profile):
"""Checks if the port belongs to a supported vendor.
Returns True for supported_pci_devs.
"""
vendor_info = profile.get('pci_vendor_info')
if not vendor_info:
return False
if vendor_info not in self.supported_pci_devs:
return False
return True
def is_vmfex_port(self, profile):
"""Checks if the port is a VMFEX port.
Returns True only for port that support VM-FEX.
It is important to distinguish between the two since Port Profiles
on the UCS Manager are created only for the VM-FEX ports.
"""
vendor_info = profile.get('pci_vendor_info')
return vendor_info == const.PCI_INFO_CISCO_VIC_1240
def _import_ucsmsdk(self):
"""Imports the Ucsm SDK module.
This module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of UcsSdk.
"""
return importutils.import_module('UcsSdk')
@contextmanager
def ucsm_connect_disconnect(self, ucsm_ip):
handle = self.ucs_manager_connect(ucsm_ip)
try:
yield handle
finally:
self.ucs_manager_disconnect(handle, ucsm_ip)
def ucs_manager_connect(self, ucsm_ip):
"""Connects to a UCS Manager."""
if not self.ucsmsdk:
self.ucsmsdk = self._import_ucsmsdk()
credentials = self.ucsm_conf.get_credentials_for_ucsm_ip(
ucsm_ip)
handle = self.ucsmsdk.UcsHandle()
try:
handle.Login(ucsm_ip, credentials[1], credentials[0])
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConnectFailed(ucsm_ip=ucsm_ip, exc=e)
return handle
def _get_server_name(self, handle, service_profile_mo, ucsm_ip):
"""Get the contents of the 'Name' field associated with UCS Server.
When a valid connection hande to UCS Manager is handed in, the Name
field associated with a UCS Server is returned.
"""
try:
resolved_dest = handle.ConfigResolveDn(service_profile_mo.PnDn)
server_list = resolved_dest.OutConfig.GetChild()
if not server_list:
return ""
return server_list[0].Name
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e)
def _create_ucsm_host_to_service_profile_mapping(self):
"""Reads list of Service profiles and finds associated Server."""
ucsm_ips = self.ucsm_conf.get_all_ucsm_ips()
for ucsm_ip in ucsm_ips:
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
sp_list_temp = handle.ConfigResolveClass('lsServer', None,
inHierarchical=False)
if sp_list_temp and sp_list_temp.OutConfigs is not None:
sp_list = sp_list_temp.OutConfigs.GetChild() or []
for sp in sp_list:
if sp.PnDn != "":
server_name = self._get_server_name(handle, sp,
ucsm_ip)
if server_name != "":
key = (ucsm_ip, server_name)
self.ucsm_sp_dict[key] = sp.Dn
self.ucsm_host_dict[server_name] = ucsm_ip
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e)
def get_ucsm_ip_for_host(self, host_id):
return self.ucsm_host_dict.get(host_id)
def _create_vlanprofile(self, vlan_id, ucsm_ip):
"""Creates VLAN profile to be assosiated with the Port Profile."""
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
handle.StartTransaction()
vp1 = handle.GetManagedObject(
None,
self.ucsmsdk.FabricLanCloud.ClassId(),
{self.ucsmsdk.FabricLanCloud.DN: const.VLAN_PATH})
if not vp1:
LOG.warn(_LW('UCS Manager network driver Vlan Profile '
'path at %s missing'), const.VLAN_PATH)
return False
# Create a vlan profile with the given vlan_id
vp2 = handle.AddManagedObject(
vp1,
self.ucsmsdk.FabricVlan.ClassId(),
{self.ucsmsdk.FabricVlan.COMPRESSION_TYPE:
const.VLAN_COMPRESSION_TYPE,
self.ucsmsdk.FabricVlan.DN: vlan_profile_dest,
self.ucsmsdk.FabricVlan.SHARING: const.NONE,
self.ucsmsdk.FabricVlan.PUB_NW_NAME: "",
self.ucsmsdk.FabricVlan.ID: str(vlan_id),
self.ucsmsdk.FabricVlan.MCAST_POLICY_NAME: "",
self.ucsmsdk.FabricVlan.NAME: vlan_name,
self.ucsmsdk.FabricVlan.DEFAULT_NET: "no"})
handle.CompleteTransaction()
if vp2:
LOG.debug('UCS Manager network driver created Vlan '
'Profile %s at %s', vlan_name, vlan_profile_dest)
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Vlan Profile',
vlan_name, ucsm_ip)
def _create_port_profile(self, profile_name, vlan_id, vnic_type, ucsm_ip):
"""Creates a Port Profile on the UCS Manager.
Significant parameters set in the port profile are:
1. Port profile name - Should match what was set in vif_details
2. High performance mode - For VM-FEX to be enabled/configured on
the port using this port profile, this mode should be enabled.
3. Vlan id - Vlan id used by traffic to and from the port.
"""
port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
profile_name)
vlan_name = self.make_vlan_name(vlan_id)
vlan_associate_path = (const.PORT_PROFILESETDN +
const.VNIC_PATH_PREFIX + profile_name +
const.VLAN_PATH_PREFIX + vlan_name)
cl_profile_name = const.CLIENT_PROFILE_NAME_PREFIX + str(vlan_id)
cl_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
profile_name + const.CLIENT_PROFILE_PATH_PREFIX +
cl_profile_name)
# Check if direct or macvtap mode
if vnic_type == portbindings.VNIC_DIRECT:
port_mode = const.HIGH_PERF
else:
port_mode = const.NONE
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
handle.StartTransaction()
port_profile = handle.GetManagedObject(
None,
self.ucsmsdk.VnicProfileSet.ClassId(),
{self.ucsmsdk.VnicProfileSet.DN: const.PORT_PROFILESETDN})
if not port_profile:
LOG.warning(_LW('UCS Manager network driver Port Profile '
'path at %s missing'),
const.PORT_PROFILESETDN)
return False
# Create a port profile on the UCS Manager
p_profile = handle.AddManagedObject(
port_profile,
self.ucsmsdk.VnicProfile.ClassId(),
{self.ucsmsdk.VnicProfile.NAME: profile_name,
self.ucsmsdk.VnicProfile.POLICY_OWNER: "local",
self.ucsmsdk.VnicProfile.NW_CTRL_POLICY_NAME: "",
self.ucsmsdk.VnicProfile.PIN_TO_GROUP_NAME: "",
self.ucsmsdk.VnicProfile.DN: port_profile_dest,
self.ucsmsdk.VnicProfile.DESCR: const.DESCR,
self.ucsmsdk.VnicProfile.QOS_POLICY_NAME: "",
self.ucsmsdk.VnicProfile.HOST_NW_IOPERF: port_mode,
self.ucsmsdk.VnicProfile.MAX_PORTS: const.MAX_PORTS})
if not p_profile:
LOG.warn(_LW('UCS Manager network driver could not '
'create Port Profile %s.'), profile_name)
return False
LOG.debug('UCS Manager network driver associating Vlan '
'Profile with Port Profile at %s',
vlan_associate_path)
# Associate port profile with vlan profile
mo = handle.AddManagedObject(
p_profile,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_associate_path,
self.ucsmsdk.VnicEtherIf.NAME: vlan_name,
self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "yes"}, True)
if not mo:
LOG.warn(_LW('UCS Manager network driver cannot associate '
'Vlan Profile to Port Profile %s'),
profile_name)
return False
LOG.debug('UCS Manager network driver created Port Profile %s '
'at %s', profile_name, port_profile_dest)
cl_profile = handle.AddManagedObject(
p_profile,
self.ucsmsdk.VmVnicProfCl.ClassId(),
{self.ucsmsdk.VmVnicProfCl.ORG_PATH: ".*",
self.ucsmsdk.VmVnicProfCl.DN: cl_profile_dest,
self.ucsmsdk.VmVnicProfCl.NAME: cl_profile_name,
self.ucsmsdk.VmVnicProfCl.POLICY_OWNER: "local",
self.ucsmsdk.VmVnicProfCl.SW_NAME: ".*",
self.ucsmsdk.VmVnicProfCl.DC_NAME: ".*",
self.ucsmsdk.VmVnicProfCl.DESCR: const.DESCR})
handle.CompleteTransaction()
if not cl_profile:
LOG.warn(_LW('UCS Manager network driver could not '
'create Client Profile %s.'), cl_profile_name)
return False
LOG.debug('UCS Manager network driver created Client Profile '
'%s at %s', cl_profile_name, cl_profile_dest)
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Port Profile',
profile_name, ucsm_ip)
def create_portprofile(self, profile_name, vlan_id, vnic_type, host_id):
"""Top level method to create Port Profiles on the UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in the creation of the Port Profile on the UCS
Manager.
"""
ucsm_ip = self.ucsm_host_dict.get(host_id)
if not ucsm_ip:
LOG.info(_LI('UCS Manager network driver does not support Host_id '
'%s'), str(host_id))
return False
# Create Vlan Profile
if not self._create_vlanprofile(vlan_id, ucsm_ip):
LOG.error(_LE('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s'), str(vlan_id))
return False
# Create Port Profile
if not self._create_port_profile(profile_name, vlan_id, vnic_type,
ucsm_ip):
LOG.error(_LE('UCS Manager network driver failed to create '
'Port Profile %s'), profile_name)
return False
return True
def _update_service_profile(self, service_profile, vlan_id, ucsm_ip):
"""Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in.
"""
eth0 = str(service_profile) + const.ETH0
eth1 = str(service_profile) + const.ETH1
eth_port_paths = [eth0, eth1]
vlan_name = self.make_vlan_name(vlan_id)
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
obj = handle.GetManagedObject(
None,
self.ucsmsdk.LsServer.ClassId(),
{self.ucsmsdk.LsServer.DN: service_profile})
if not obj:
LOG.debug('UCS Manager network driver could not find '
'Service Profile %s at', service_profile)
return False
for eth_port_path in eth_port_paths:
eth = handle.GetManagedObject(
obj, self.ucsmsdk.VnicEther.ClassId(),
{self.ucsmsdk.VnicEther.DN: eth_port_path}, True)
if eth:
vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX +
vlan_name)
eth_if = handle.AddManagedObject(eth,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_path,
self.ucsmsdk.VnicEtherIf.NAME: vlan_name,
self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True)
if not eth_if:
LOG.debug('UCS Manager network driver could not '
'update Service Profile %s with vlan %s',
service_profile,
str(vlan_id))
return False
else:
LOG.debug('UCS Manager network driver did not find '
'ethernet port at %s', eth_port_path)
handle.CompleteTransaction()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Service Profile',
vlan_name, ucsm_ip)
def update_serviceprofile(self, host_id, vlan_id):
"""Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports.
"""
ucsm_ip = self.ucsm_host_dict.get(host_id)
service_profile = self.ucsm_sp_dict.get(ucsm_ip, host_id)
if service_profile:
LOG.debug("UCS Manager network driver Service Profile : %s",
service_profile)
else:
LOG.info(_LI('UCS Manager network driver does not support Host_id '
'%s'), str(host_id))
return False
# Create Vlan Profile
if not self._create_vlanprofile(vlan_id, ucsm_ip):
LOG.error(_LE('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s'), str(vlan_id))
return False
# Update Service Profile
if not self._update_service_profile(service_profile, vlan_id, ucsm_ip):
LOG.error(_LE('UCS Manager network driver failed to update '
'Service Profile %s'), service_profile)
return False
return True
def _delete_vlan_profile(self, vlan_id, ucsm_ip):
"""Deletes VLAN Profile from UCS Manager."""
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
handle.StartTransaction()
obj = handle.GetManagedObject(
None,
self.ucsmsdk.FabricVlan.ClassId(),
{self.ucsmsdk.FabricVlan.DN: vlan_profile_dest})
if obj:
handle.RemoveManagedObject(obj)
handle.CompleteTransaction()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigFailed(config=vlan_id,
ucsm_ip=ucsm_ip, exc=e)
def _delete_port_profile(self, port_profile, ucsm_ip):
"""Deletes Port Profile from UCS Manager."""
port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
port_profile)
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
handle.StartTransaction()
# Find port profile on the UCS Manager
p_profile = handle.GetManagedObject(
None,
self.ucsmsdk.VnicProfile.ClassId(),
{self.ucsmsdk.VnicProfile.NAME: port_profile,
self.ucsmsdk.VnicProfile.DN: port_profile_dest})
if not p_profile:
LOG.warn(_LW('UCS Manager network driver did not find '
'Port Profile %s to delete.'), port_profile)
return
handle.RemoveManagedObject(p_profile)
handle.CompleteTransaction()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigFailed(config=port_profile,
ucsm_ip=ucsm_ip, exc=e)
def _remove_vlan_from_all_service_profiles(self, vlan_id, ucsm_ip):
"""Deletes VLAN Profile config from server's ethernet ports."""
service_profile_list = []
for ucsm, host_id, value in six.iteritems(self.ucsm_sp_dict):
if ucsm == ucsm_ip and value:
service_profile_list.append(value)
if not service_profile_list:
# Nothing to do
return
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
handle.StartTransaction()
for service_profile in service_profile_list:
eth0 = service_profile + const.ETH0
eth1 = service_profile + const.ETH1
eth_port_paths = [eth0, eth1]
# 1. From the Service Profile config, access the
# configuration for its ports.
# 2. Check if that Vlan has been configured on each port
# 3. If Vlan conifg found, remove it.
obj = handle.GetManagedObject(
None,
self.ucsmsdk.LsServer.ClassId(),
{self.ucsmsdk.LsServer.DN: service_profile})
if obj:
# Check if this vlan_id has been configured on the
# ports in this Service profile
for eth_port_path in eth_port_paths:
eth = handle.GetManagedObject(
obj, self.ucsmsdk.VnicEther.ClassId(),
{self.ucsmsdk.VnicEther.DN: eth_port_path},
True)
if eth:
vlan_name = self.make_vlan_name(vlan_id)
vlan_path = eth_port_path + "/if-" + vlan_name
vlan = handle.GetManagedObject(eth,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_path})
if vlan:
# Found vlan config. Now remove it.
handle.RemoveManagedObject(vlan)
handle.CompleteTransaction()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigFailed(config=vlan_id,
ucsm_ip=ucsm_ip, exc=e)
def delete_all_config_for_vlan(self, vlan_id, port_profile):
"""Top level method to delete all config for vlan_id."""
ucsm_ips = self.ucsm_conf.get_all_ucsm_ips()
for ucsm_ip in ucsm_ips:
self._delete_port_profile(port_profile, ucsm_ip)
self._remove_vlan_from_all_service_profiles(vlan_id, ucsm_ip)
self._delete_vlan_profile(vlan_id, ucsm_ip)
def _handle_ucsm_exception(self, exception_type, profile_type,
profile_name, ucsm_ip):
if const.DUPLICATE_EXCEPTION in str(exception_type):
LOG.debug('UCS Manager network driver ignoring duplicate '
'create/update of %s with %s',
profile_type, profile_name)
return True
else:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigFailed(config=profile_name,
ucsm_ip=ucsm_ip,
exc=exception_type)
def ucs_manager_disconnect(self, handle, ucsm_ip):
"""Disconnects from the UCS Manager.
After the disconnect, the handle associated with this connection
is no longer valid.
"""
try:
handle.Logout()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmDisconnectFailed(ucsm_ip=ucsm_ip, exc=e)
@staticmethod
def make_vlan_name(vlan_id):
return const.VLAN_PROFILE_NAME_PREFIX + str(vlan_id)
| |
# Copyright 2012 Managed I.T.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import contextlib
import functools
from oslo.config import cfg
from oslo import messaging
from designate.openstack.common import log as logging
from designate.openstack.common import excutils
from designate.i18n import _LI
from designate.i18n import _LC
from designate import backend
from designate import central
from designate import exceptions
from designate import network_api
from designate import objects
from designate import policy
from designate import quota
from designate import service
from designate import utils
from designate import storage
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def wrap_backend_call():
"""
Wraps backend calls, ensuring any exception raised is a Backend exception.
"""
try:
yield
except exceptions.Backend as exc:
raise
except Exception as exc:
raise exceptions.Backend('Unknown backend failure: %r' % exc)
def transaction(f):
# TODO(kiall): Get this a better home :)
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
self.storage.begin()
try:
result = f(self, *args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
self.storage.rollback()
else:
self.storage.commit()
return result
return wrapper
class Service(service.RPCService):
RPC_API_VERSION = '4.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, *args, **kwargs):
super(Service, self).__init__(*args, **kwargs)
backend_driver = cfg.CONF['service:central'].backend_driver
self.backend = backend.get_backend(backend_driver, self)
# Get a storage connection
storage_driver = cfg.CONF['service:central'].storage_driver
self.storage = storage.get_storage(storage_driver)
# Get a quota manager instance
self.quota = quota.get_quota()
self.network_api = network_api.get_network_api(cfg.CONF.network_api)
def start(self):
# Check to see if there are any TLDs in the database
tlds = self.storage.find_tlds({})
if tlds:
self.check_for_tlds = True
LOG.info(_LI("Checking for TLDs"))
else:
self.check_for_tlds = False
LOG.info(_LI("NOT checking for TLDs"))
self.backend.start()
super(Service, self).start()
def stop(self):
super(Service, self).stop()
self.backend.stop()
@property
def mdns_api(self):
return central.get_mdns_api()
def _is_valid_domain_name(self, context, domain_name):
# Validate domain name length
if len(domain_name) > cfg.CONF['service:central'].max_domain_name_len:
raise exceptions.InvalidDomainName('Name too long')
# Break the domain name up into its component labels
domain_labels = domain_name.strip('.').split('.')
# We need more than 1 label.
if len(domain_labels) <= 1:
raise exceptions.InvalidDomainName('More than one label is '
'required')
# Check the TLD for validity if there are entries in the database
if self.check_for_tlds:
try:
self.storage.find_tld(context, {'name': domain_labels[-1]})
except exceptions.TldNotFound:
raise exceptions.InvalidDomainName('Invalid TLD')
# Now check that the domain name is not the same as a TLD
try:
stripped_domain_name = domain_name.strip('.').lower()
self.storage.find_tld(
context,
{'name': stripped_domain_name})
except exceptions.TldNotFound:
pass
else:
raise exceptions.InvalidDomainName(
'Domain name cannot be the same as a TLD')
# Check domain name blacklist
if self._is_blacklisted_domain_name(context, domain_name):
# Some users are allowed bypass the blacklist.. Is this one?
if not policy.check('use_blacklisted_domain', context,
do_raise=False):
raise exceptions.InvalidDomainName('Blacklisted domain name')
return True
def _is_valid_recordset_name(self, context, domain, recordset_name):
if not recordset_name.endswith('.'):
raise ValueError('Please supply a FQDN')
# Validate record name length
max_len = cfg.CONF['service:central'].max_recordset_name_len
if len(recordset_name) > max_len:
raise exceptions.InvalidRecordSetName('Name too long')
# RecordSets must be contained in the parent zone
if not recordset_name.endswith(domain['name']):
raise exceptions.InvalidRecordSetLocation(
'RecordSet is not contained within it\'s parent domain')
def _is_valid_recordset_placement(self, context, domain, recordset_name,
recordset_type, recordset_id=None):
# CNAME's must not be created at the zone apex.
if recordset_type == 'CNAME' and recordset_name == domain.name:
raise exceptions.InvalidRecordSetLocation(
'CNAME recordsets may not be created at the zone apex')
# CNAME's must not share a name with other recordsets
criterion = {
'domain_id': domain.id,
'name': recordset_name,
}
if recordset_type != 'CNAME':
criterion['type'] = 'CNAME'
recordsets = self.storage.find_recordsets(context, criterion)
if ((len(recordsets) == 1 and recordsets[0].id != recordset_id)
or len(recordsets) > 1):
raise exceptions.InvalidRecordSetLocation(
'CNAME recordsets may not share a name with any other records')
return True
def _is_valid_recordset_placement_subdomain(self, context, domain,
recordset_name,
criterion=None):
"""
Check that the placement of the requested rrset belongs to any of the
domains subdomains..
"""
LOG.debug("Checking if %s belongs in any of %s subdomains" %
(recordset_name, domain.name))
criterion = criterion or {}
context = context.elevated()
context.all_tenants = True
if domain.name == recordset_name:
return
child_domains = self.storage.find_domains(
context, {"parent_domain_id": domain.id})
for child_domain in child_domains:
try:
self._is_valid_recordset_name(
context, child_domain, recordset_name)
except Exception:
continue
else:
msg = 'RecordSet belongs in a child zone: %s' % \
child_domain['name']
raise exceptions.InvalidRecordSetLocation(msg)
def _is_blacklisted_domain_name(self, context, domain_name):
"""
Ensures the provided domain_name is not blacklisted.
"""
blacklists = self.storage.find_blacklists(context)
for blacklist in blacklists:
if bool(re.search(blacklist.pattern, domain_name)):
return True
return False
def _is_subdomain(self, context, domain_name):
"""
Ensures the provided domain_name is the subdomain
of an existing domain (checks across all tenants)
"""
context = context.elevated()
context.all_tenants = True
# Break the name up into it's component labels
labels = domain_name.split(".")
i = 1
# Starting with label #2, search for matching domain's in the database
while (i < len(labels)):
name = '.'.join(labels[i:])
try:
domain = self.storage.find_domain(context, {'name': name})
except exceptions.DomainNotFound:
i += 1
else:
return domain
return False
def _is_superdomain(self, context, domain_name):
"""
Ensures the provided domain_name is the parent domain
of an existing subdomain (checks across all tenants)
"""
context = context.elevated()
context.all_tenants = True
# Create wildcard term to catch all subdomains
search_term = "*%s" % domain_name
try:
criterion = {'name': search_term}
subdomains = self.storage.find_domains(context, criterion)
except exceptions.DomainNotFound:
return False
return subdomains
def _is_valid_ttl(self, context, ttl):
min_ttl = cfg.CONF['service:central'].min_ttl
if min_ttl != "None" and ttl < int(min_ttl):
try:
policy.check('use_low_ttl', context)
except exceptions.Forbidden:
raise exceptions.InvalidTTL('TTL is below the minimum: %s'
% min_ttl)
def _increment_domain_serial(self, context, domain_id):
domain = self.storage.get_domain(context, domain_id)
# Increment the serial number
domain.serial = utils.increment_serial(domain.serial)
domain = self.storage.update_domain(context, domain)
with wrap_backend_call():
self.backend.update_domain(context, domain)
# Update SOA record
self._update_soa(context, domain)
return domain
# Methods to handle priority
def _get_priority(self, recordset):
if recordset.type != "MX" and recordset.type != "SRV":
return recordset
else:
if recordset.records is not None:
for r in recordset.records:
r.data = str(r.priority) + " " + r.data
return recordset
def _set_priority(self, recordset):
if recordset.type != "MX" and recordset.type != "SRV":
return recordset
else:
if recordset.records is not None:
for r in recordset.records:
head, sep, tail = r.data.partition(" ")
if sep:
r.priority = head
r.data = tail
return recordset
# SOA Recordset Methods
def _build_soa_record(self, zone, servers):
return "%s %s. %d %d %d %d %d" % (servers[0]['name'],
zone['email'].replace("@", "."),
zone['serial'],
zone['refresh'],
zone['retry'],
zone['expire'],
zone['minimum'])
def _create_soa(self, context, zone):
# Need elevated context to get the servers
elevated_context = context.elevated()
elevated_context.all_tenants = True
servers = self.find_servers(elevated_context)
soa_values = [self._build_soa_record(zone, servers)]
recordlist = objects.RecordList(objects=[
objects.Record(data=r, managed=True) for r in soa_values])
values = {
'name': zone['name'],
'type': "SOA",
'records': recordlist
}
soa = self.create_recordset(context, zone['id'],
objects.RecordSet(**values), False)
return soa
def _update_soa(self, context, zone):
servers = self.get_domain_servers(context, zone['id'])
soa = self.find_recordset(context,
criterion={'domain_id': zone['id'],
'type': "SOA"})
new_values = [self._build_soa_record(zone, servers)]
recordlist = objects.RecordList(objects=[
objects.Record(data=r) for r in new_values])
soa.records = recordlist
self.update_recordset(context, soa, increment_serial=False)
# NS Recordset Methods
def _create_ns(self, context, zone, servers):
# Create an NS record for each server
ns_values = []
for s in servers:
ns_values.append(s.name)
recordlist = objects.RecordList(objects=[
objects.Record(data=r, managed=True) for r in ns_values])
values = {
'name': zone['name'],
'type': "NS",
'records': recordlist
}
ns = self.create_recordset(context, zone['id'],
objects.RecordSet(**values), False)
return ns
def _update_ns(self, context, zone, orig_name, new_name):
# Get the zone's NS recordset
ns = self.find_recordset(context,
criterion={'domain_id': zone['id'],
'type': "NS"})
#
for r in ns.records:
if r.data == orig_name:
r.data = new_name
self.update_recordset(context, ns)
def _add_ns(self, context, zone, server):
# Get NS recordset
ns = self.find_recordset(context,
criterion={'domain_id': zone['id'],
'type': "NS"})
# Add new record to recordset
ns_record = objects.Record(data=server.name)
new_record = self.create_record(context, zone['id'],
ns['id'], ns_record,
increment_serial=False)
ns.records.append(new_record)
self.update_recordset(context, ns)
def _delete_ns(self, context, zone, server):
ns = self.find_recordset(context,
criterion={'domain_id': zone['id'],
'type': "NS"})
records = ns.records
for r in records:
if r.data == server.name:
ns.records.remove(r)
self.update_recordset(context, ns)
# Quota Enforcement Methods
def _enforce_domain_quota(self, context, tenant_id):
criterion = {'tenant_id': tenant_id}
count = self.storage.count_domains(context, criterion)
self.quota.limit_check(context, tenant_id, domains=count)
def _enforce_recordset_quota(self, context, domain):
# TODO(kiall): Enforce RRSet Quotas
pass
def _enforce_record_quota(self, context, domain, recordset):
# Ensure the records per domain quota is OK
criterion = {'domain_id': domain['id']}
count = self.storage.count_records(context, criterion)
self.quota.limit_check(context, domain['tenant_id'],
domain_records=count)
# TODO(kiall): Enforce Records per RRSet Quotas
# Misc Methods
def get_absolute_limits(self, context):
# NOTE(Kiall): Currently, we only have quota based limits..
return self.quota.get_quotas(context, context.tenant)
# Quota Methods
def get_quotas(self, context, tenant_id):
target = {'tenant_id': tenant_id}
policy.check('get_quotas', context, target)
# This allows admins to get quota information correctly for all tenants
context.all_tenants = True
return self.quota.get_quotas(context, tenant_id)
def get_quota(self, context, tenant_id, resource):
target = {'tenant_id': tenant_id, 'resource': resource}
policy.check('get_quota', context, target)
return self.quota.get_quota(context, tenant_id, resource)
@transaction
def set_quota(self, context, tenant_id, resource, hard_limit):
target = {
'tenant_id': tenant_id,
'resource': resource,
'hard_limit': hard_limit,
}
policy.check('set_quota', context, target)
return self.quota.set_quota(context, tenant_id, resource, hard_limit)
@transaction
def reset_quotas(self, context, tenant_id):
target = {'tenant_id': tenant_id}
policy.check('reset_quotas', context, target)
self.quota.reset_quotas(context, tenant_id)
# Server Methods
@transaction
def create_server(self, context, server):
policy.check('create_server', context)
created_server = self.storage.create_server(context, server)
# Update backend with the new server..
with wrap_backend_call():
self.backend.create_server(context, created_server)
self.notifier.info(context, 'dns.server.create', created_server)
# Update NS recordsets for all zones
elevated_context = context.elevated()
elevated_context.all_tenants = True
zones = self.find_domains(elevated_context)
# Create a new NS recordset for for every zone
for z in zones:
self._add_ns(elevated_context, z, server)
return created_server
def find_servers(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
policy.check('find_servers', context)
return self.storage.find_servers(context, criterion, marker, limit,
sort_key, sort_dir)
def get_server(self, context, server_id):
policy.check('get_server', context, {'server_id': server_id})
return self.storage.get_server(context, server_id)
@transaction
def update_server(self, context, server):
target = {
'server_id': server.obj_get_original_value('id'),
}
policy.check('update_server', context, target)
orig_server_name = server.obj_get_original_value('name')
new_server_name = server.name
server = self.storage.update_server(context, server)
# Update backend with the new details..
with wrap_backend_call():
self.backend.update_server(context, server)
self.notifier.info(context, 'dns.server.update', server)
# Update NS recordsets for all zones
elevated_context = context.elevated()
elevated_context.all_tenants = True
zones = self.find_domains(elevated_context)
for z in zones:
self._update_ns(elevated_context, z, orig_server_name,
new_server_name)
return server
@transaction
def delete_server(self, context, server_id):
policy.check('delete_server', context, {'server_id': server_id})
# don't delete last of servers
servers = self.storage.find_servers(context)
if len(servers) == 1 and server_id == servers[0].id:
raise exceptions.LastServerDeleteNotAllowed(
"Not allowed to delete last of servers")
server = self.storage.delete_server(context, server_id)
# Update NS recordsets for all zones
elevated_context = context.elevated()
elevated_context.all_tenants = True
zones = self.find_domains(elevated_context)
for z in zones:
self._delete_ns(elevated_context, z, server)
# Update backend with the new server..
with wrap_backend_call():
self.backend.delete_server(context, server)
self.notifier.info(context, 'dns.server.delete', server)
# TLD Methods
@transaction
def create_tld(self, context, tld):
policy.check('create_tld', context)
# The TLD is only created on central's storage and not on the backend.
created_tld = self.storage.create_tld(context, tld)
self.notifier.info(context, 'dns.tld.create', created_tld)
# Set check for tlds to be true
self.check_for_tlds = True
return created_tld
def find_tlds(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
policy.check('find_tlds', context)
return self.storage.find_tlds(context, criterion, marker, limit,
sort_key, sort_dir)
def get_tld(self, context, tld_id):
policy.check('get_tld', context, {'tld_id': tld_id})
return self.storage.get_tld(context, tld_id)
@transaction
def update_tld(self, context, tld):
target = {
'tld_id': tld.obj_get_original_value('id'),
}
policy.check('update_tld', context, target)
tld = self.storage.update_tld(context, tld)
self.notifier.info(context, 'dns.tld.update', tld)
return tld
@transaction
def delete_tld(self, context, tld_id):
# Known issue - self.check_for_tld is not reset here. So if the last
# TLD happens to be deleted, then we would incorrectly do the TLD
# validations.
# This decision was influenced by weighing the (ultra low) probability
# of hitting this issue vs doing the checks for every delete.
policy.check('delete_tld', context, {'tld_id': tld_id})
tld = self.storage.delete_tld(context, tld_id)
self.notifier.info(context, 'dns.tld.delete', tld)
# TSIG Key Methods
@transaction
def create_tsigkey(self, context, tsigkey):
policy.check('create_tsigkey', context)
created_tsigkey = self.storage.create_tsigkey(context, tsigkey)
with wrap_backend_call():
self.backend.create_tsigkey(context, created_tsigkey)
self.notifier.info(context, 'dns.tsigkey.create', created_tsigkey)
return created_tsigkey
def find_tsigkeys(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
policy.check('find_tsigkeys', context)
return self.storage.find_tsigkeys(context, criterion, marker,
limit, sort_key, sort_dir)
def get_tsigkey(self, context, tsigkey_id):
policy.check('get_tsigkey', context, {'tsigkey_id': tsigkey_id})
return self.storage.get_tsigkey(context, tsigkey_id)
@transaction
def update_tsigkey(self, context, tsigkey):
target = {
'tsigkey_id': tsigkey.obj_get_original_value('id'),
}
policy.check('update_tsigkey', context, target)
tsigkey = self.storage.update_tsigkey(context, tsigkey)
with wrap_backend_call():
self.backend.update_tsigkey(context, tsigkey)
self.notifier.info(context, 'dns.tsigkey.update', tsigkey)
return tsigkey
@transaction
def delete_tsigkey(self, context, tsigkey_id):
policy.check('delete_tsigkey', context, {'tsigkey_id': tsigkey_id})
tsigkey = self.storage.delete_tsigkey(context, tsigkey_id)
with wrap_backend_call():
self.backend.delete_tsigkey(context, tsigkey)
self.notifier.info(context, 'dns.tsigkey.delete', tsigkey)
# Tenant Methods
def find_tenants(self, context):
policy.check('find_tenants', context)
return self.storage.find_tenants(context)
def get_tenant(self, context, tenant_id):
target = {
'tenant_id': tenant_id
}
policy.check('get_tenant', context, target)
return self.storage.get_tenant(context, tenant_id)
def count_tenants(self, context):
policy.check('count_tenants', context)
return self.storage.count_tenants(context)
# Domain Methods
@transaction
def create_domain(self, context, domain):
# TODO(kiall): Refactor this method into *MUCH* smaller chunks.
# Default to creating in the current users tenant
if domain.tenant_id is None:
domain.tenant_id = context.tenant
target = {
'tenant_id': domain.tenant_id,
'domain_name': domain.name
}
policy.check('create_domain', context, target)
# Ensure the tenant has enough quota to continue
self._enforce_domain_quota(context, domain.tenant_id)
# Ensure the domain name is valid
self._is_valid_domain_name(context, domain.name)
# Ensure TTL is above the minimum
if domain.ttl is not None:
self._is_valid_ttl(context, domain.ttl)
# Handle sub-domains appropriately
parent_domain = self._is_subdomain(context, domain.name)
if parent_domain:
if parent_domain.tenant_id == domain.tenant_id:
# Record the Parent Domain ID
domain.parent_domain_id = parent_domain.id
else:
raise exceptions.Forbidden('Unable to create subdomain in '
'another tenants domain')
# Handle super-domains appropriately
subdomains = self._is_superdomain(context, domain.name)
if subdomains:
LOG.debug("Domain '{0}' is a superdomain.".format(domain.name))
for subdomain in subdomains:
if subdomain.tenant_id != domain.tenant_id:
raise exceptions.Forbidden('Unable to create domain '
'because another tenant '
'owns a subdomain of '
'the domain')
# If this succeeds, subdomain parent IDs will be updated
# after domain is created
# NOTE(kiall): Fetch the servers before creating the domain, this way
# we can prevent domain creation if no servers are
# configured.
servers = self.storage.find_servers(context)
if len(servers) == 0:
LOG.critical(_LC('No servers configured. '
'Please create at least one server'))
raise exceptions.NoServersConfigured()
# Set the serial number
domain.serial = utils.increment_serial()
created_domain = self.storage.create_domain(context, domain)
with wrap_backend_call():
self.backend.create_domain(context, created_domain)
self.notifier.info(context, 'dns.domain.create', created_domain)
# If domain is a superdomain, update subdomains
# with new parent IDs
for subdomain in subdomains:
LOG.debug("Updating subdomain '{0}' parent ID "
"using superdomain ID '{1}'"
.format(subdomain.name, domain.id))
subdomain.parent_domain_id = domain.id
self.update_domain(context, subdomain)
# Create the NS and SOA recordsets for the new domain. SOA must be
# last, in order to ensure BIND etc do not read the zone file before
# all changes have been committed to the zone file.
self._create_ns(context, created_domain, servers)
self._create_soa(context, created_domain)
return created_domain
def get_domain(self, context, domain_id):
domain = self.storage.get_domain(context, domain_id)
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'tenant_id': domain.tenant_id
}
policy.check('get_domain', context, target)
return domain
def get_domain_servers(self, context, domain_id, criterion=None):
domain = self.storage.get_domain(context, domain_id)
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'tenant_id': domain.tenant_id
}
policy.check('get_domain_servers', context, target)
# TODO(kiall): Once we allow domains to be allocated on 1 of N server
# pools, return the filtered list here.
return self.storage.find_servers(context, criterion)
def find_domains(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
target = {'tenant_id': context.tenant}
policy.check('find_domains', context, target)
return self.storage.find_domains(context, criterion, marker, limit,
sort_key, sort_dir)
def find_domain(self, context, criterion=None):
target = {'tenant_id': context.tenant}
policy.check('find_domain', context, target)
return self.storage.find_domain(context, criterion)
@transaction
def update_domain(self, context, domain, increment_serial=True):
# TODO(kiall): Refactor this method into *MUCH* smaller chunks.
target = {
'domain_id': domain.obj_get_original_value('id'),
'domain_name': domain.obj_get_original_value('name'),
'tenant_id': domain.obj_get_original_value('tenant_id'),
}
policy.check('update_domain', context, target)
changes = domain.obj_get_changes()
# Ensure immutable fields are not changed
if 'tenant_id' in changes:
# TODO(kiall): Moving between tenants should be allowed, but the
# current code will not take into account that
# RecordSets and Records must also be moved.
raise exceptions.BadRequest('Moving a domain between tenants is '
'not allowed')
if 'name' in changes:
raise exceptions.BadRequest('Renaming a domain is not allowed')
# Ensure TTL is above the minimum
ttl = changes.get('ttl', None)
if ttl is not None:
self._is_valid_ttl(context, ttl)
if increment_serial:
# Increment the serial number
domain.serial = utils.increment_serial(domain.serial)
domain = self.storage.update_domain(context, domain)
with wrap_backend_call():
self.backend.update_domain(context, domain)
if increment_serial:
# Update the SOA Record
self._update_soa(context, domain)
self.notifier.info(context, 'dns.domain.update', domain)
self.mdns_api.notify_zone_changed(context, domain.name)
return domain
@transaction
def delete_domain(self, context, domain_id):
domain = self.storage.get_domain(context, domain_id)
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'tenant_id': domain.tenant_id
}
policy.check('delete_domain', context, target)
# Prevent deletion of a zone which has child zones
criterion = {'parent_domain_id': domain_id}
if self.storage.count_domains(context, criterion) > 0:
raise exceptions.DomainHasSubdomain('Please delete any subdomains '
'before deleting this domain')
domain = self.storage.delete_domain(context, domain_id)
with wrap_backend_call():
self.backend.delete_domain(context, domain)
self.notifier.info(context, 'dns.domain.delete', domain)
return domain
def count_domains(self, context, criterion=None):
if criterion is None:
criterion = {}
target = {
'tenant_id': criterion.get('tenant_id', None)
}
policy.check('count_domains', context, target)
return self.storage.count_domains(context, criterion)
# Report combining all the count reports based on criterion
def count_report(self, context, criterion=None):
reports = []
if criterion is None:
# Get all the reports
reports.append({'zones': self.count_domains(context),
'records': self.count_records(context),
'tenants': self.count_tenants(context)})
elif criterion == 'zones':
reports.append({'zones': self.count_domains(context)})
elif criterion == 'records':
reports.append({'records': self.count_records(context)})
elif criterion == 'tenants':
reports.append({'tenants': self.count_tenants(context)})
else:
raise exceptions.ReportNotFound()
return reports
@transaction
def touch_domain(self, context, domain_id):
domain = self.storage.get_domain(context, domain_id)
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'tenant_id': domain.tenant_id
}
policy.check('touch_domain', context, target)
domain = self._increment_domain_serial(context, domain_id)
self.notifier.info(context, 'dns.domain.touch', domain)
return domain
# RecordSet Methods
@transaction
def create_recordset(self, context, domain_id, recordset,
increment_serial=True):
domain = self.storage.get_domain(context, domain_id)
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'recordset_name': recordset.name,
'tenant_id': domain.tenant_id,
}
policy.check('create_recordset', context, target)
# Ensure the tenant has enough quota to continue
self._enforce_recordset_quota(context, domain)
# Ensure TTL is above the minimum
ttl = getattr(recordset, 'ttl', None)
if ttl is not None:
self._is_valid_ttl(context, ttl)
# Ensure the recordset name and placement is valid
self._is_valid_recordset_name(context, domain, recordset.name)
self._is_valid_recordset_placement(context, domain, recordset.name,
recordset.type)
self._is_valid_recordset_placement_subdomain(
context, domain, recordset.name)
# Extract the priority from the records
recordset = self._set_priority(recordset)
created_recordset = self.storage.create_recordset(context, domain_id,
recordset)
with wrap_backend_call():
self.backend.create_recordset(context, domain, created_recordset)
# Send RecordSet creation notification
self.notifier.info(context, 'dns.recordset.create', created_recordset)
# Only increment the serial # if records exist and
# increment_serial = True
if increment_serial:
if len(recordset.records) > 0:
self._increment_domain_serial(context, domain.id)
# Get the correct format for priority
return self._get_priority(recordset)
def get_recordset(self, context, domain_id, recordset_id):
domain = self.storage.get_domain(context, domain_id)
recordset = self.storage.get_recordset(context, recordset_id)
# Ensure the domain_id matches the record's domain_id
if domain.id != recordset.domain_id:
raise exceptions.RecordSetNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'recordset_id': recordset.id,
'tenant_id': domain.tenant_id,
}
policy.check('get_recordset', context, target)
# Add the priority to the records
recordset = self._get_priority(recordset)
return recordset
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
target = {'tenant_id': context.tenant}
policy.check('find_recordsets', context, target)
recordsets = self.storage.find_recordsets(context, criterion, marker,
limit, sort_key, sort_dir)
# Set the priority for each record
for rs in recordsets:
rs = self._get_priority(rs)
return recordsets
def find_recordset(self, context, criterion=None):
target = {'tenant_id': context.tenant}
policy.check('find_recordset', context, target)
recordset = self.storage.find_recordset(context, criterion)
# Add the priority to the records
recordset = self._get_priority(recordset)
return recordset
@transaction
def update_recordset(self, context, recordset, increment_serial=True):
domain_id = recordset.obj_get_original_value('domain_id')
domain = self.storage.get_domain(context, domain_id)
# Set the priority for the records
recordset = self._set_priority(recordset)
changes = recordset.obj_get_changes()
# Ensure immutable fields are not changed
if 'tenant_id' in changes:
raise exceptions.BadRequest('Moving a recordset between tenants '
'is not allowed')
if 'domain_id' in changes:
raise exceptions.BadRequest('Moving a recordset between domains '
'is not allowed')
if 'type' in changes:
raise exceptions.BadRequest('Changing a recordsets type is not '
'allowed')
target = {
'domain_id': recordset.obj_get_original_value('domain_id'),
'recordset_id': recordset.obj_get_original_value('id'),
'domain_name': domain.name,
'tenant_id': domain.tenant_id
}
policy.check('update_recordset', context, target)
# Ensure the record name is valid
self._is_valid_recordset_name(context, domain, recordset.name)
self._is_valid_recordset_placement(context, domain, recordset.name,
recordset.type, recordset.id)
self._is_valid_recordset_placement_subdomain(
context, domain, recordset.name)
# Ensure TTL is above the minimum
ttl = changes.get('ttl', None)
if ttl is not None:
self._is_valid_ttl(context, ttl)
# Update the recordset
recordset = self.storage.update_recordset(context, recordset)
with wrap_backend_call():
self.backend.update_recordset(context, domain, recordset)
if increment_serial:
self._increment_domain_serial(context, domain.id)
# Send RecordSet update notification
self.notifier.info(context, 'dns.recordset.update', recordset)
self.mdns_api.notify_zone_changed(context, domain.name)
return self._get_priority(recordset)
@transaction
def delete_recordset(self, context, domain_id, recordset_id,
increment_serial=True):
domain = self.storage.get_domain(context, domain_id)
recordset = self.storage.get_recordset(context, recordset_id)
# Ensure the domain_id matches the recordset's domain_id
if domain.id != recordset.domain_id:
raise exceptions.RecordSetNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'recordset_id': recordset.id,
'tenant_id': domain.tenant_id
}
policy.check('delete_recordset', context, target)
recordset = self.storage.delete_recordset(context, recordset_id)
with wrap_backend_call():
self.backend.delete_recordset(context, domain, recordset)
if increment_serial:
self._increment_domain_serial(context, domain_id)
# Send Record deletion notification
self.notifier.info(context, 'dns.recordset.delete', recordset)
return recordset
def count_recordsets(self, context, criterion=None):
if criterion is None:
criterion = {}
target = {
'tenant_id': criterion.get('tenant_id', None)
}
policy.check('count_recordsets', context, target)
return self.storage.count_recordsets(context, criterion)
# Record Methods
@transaction
def create_record(self, context, domain_id, recordset_id, record,
increment_serial=True):
domain = self.storage.get_domain(context, domain_id)
recordset = self.storage.get_recordset(context, recordset_id)
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'recordset_id': recordset_id,
'recordset_name': recordset.name,
'tenant_id': domain.tenant_id
}
policy.check('create_record', context, target)
# Ensure the tenant has enough quota to continue
self._enforce_record_quota(context, domain, recordset)
created_record = self.storage.create_record(context, domain_id,
recordset_id, record)
with wrap_backend_call():
self.backend.create_record(
context, domain, recordset, created_record)
if increment_serial:
self._increment_domain_serial(context, domain_id)
# Send Record creation notification
self.notifier.info(context, 'dns.record.create', created_record)
return created_record
def get_record(self, context, domain_id, recordset_id, record_id):
domain = self.storage.get_domain(context, domain_id)
recordset = self.storage.get_recordset(context, recordset_id)
record = self.storage.get_record(context, record_id)
# Ensure the domain_id matches the record's domain_id
if domain.id != record.domain_id:
raise exceptions.RecordNotFound()
# Ensure the recordset_id matches the record's recordset_id
if recordset.id != record.recordset_id:
raise exceptions.RecordNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'recordset_id': recordset_id,
'recordset_name': recordset.name,
'record_id': record.id,
'tenant_id': domain.tenant_id
}
policy.check('get_record', context, target)
return record
def find_records(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
target = {'tenant_id': context.tenant}
policy.check('find_records', context, target)
return self.storage.find_records(context, criterion, marker, limit,
sort_key, sort_dir)
def find_record(self, context, criterion=None):
target = {'tenant_id': context.tenant}
policy.check('find_record', context, target)
return self.storage.find_record(context, criterion)
@transaction
def update_record(self, context, record, increment_serial=True):
domain_id = record.obj_get_original_value('domain_id')
domain = self.storage.get_domain(context, domain_id)
recordset_id = record.obj_get_original_value('recordset_id')
recordset = self.storage.get_recordset(context, recordset_id)
changes = record.obj_get_changes()
# Ensure immutable fields are not changed
if 'tenant_id' in changes:
raise exceptions.BadRequest('Moving a recordset between tenants '
'is not allowed')
if 'domain_id' in changes:
raise exceptions.BadRequest('Moving a recordset between domains '
'is not allowed')
if 'recordset_id' in changes:
raise exceptions.BadRequest('Moving a recordset between '
'recordsets is not allowed')
target = {
'domain_id': record.obj_get_original_value('domain_id'),
'domain_name': domain.name,
'recordset_id': record.obj_get_original_value('recordset_id'),
'recordset_name': recordset.name,
'record_id': record.obj_get_original_value('id'),
'tenant_id': domain.tenant_id
}
policy.check('update_record', context, target)
# Update the record
record = self.storage.update_record(context, record)
with wrap_backend_call():
self.backend.update_record(context, domain, recordset, record)
if increment_serial:
self._increment_domain_serial(context, domain.id)
# Send Record update notification
self.notifier.info(context, 'dns.record.update', record)
self.mdns_api.notify_zone_changed(context, domain.name)
return record
@transaction
def delete_record(self, context, domain_id, recordset_id, record_id,
increment_serial=True):
domain = self.storage.get_domain(context, domain_id)
recordset = self.storage.get_recordset(context, recordset_id)
record = self.storage.get_record(context, record_id)
# Ensure the domain_id matches the record's domain_id
if domain.id != record.domain_id:
raise exceptions.RecordNotFound()
# Ensure the recordset_id matches the record's recordset_id
if recordset.id != record.recordset_id:
raise exceptions.RecordNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'recordset_id': recordset_id,
'recordset_name': recordset.name,
'record_id': record.id,
'tenant_id': domain.tenant_id
}
policy.check('delete_record', context, target)
record = self.storage.delete_record(context, record_id)
with wrap_backend_call():
self.backend.delete_record(context, domain, recordset, record)
if increment_serial:
self._increment_domain_serial(context, domain_id)
# Send Record deletion notification
self.notifier.info(context, 'dns.record.delete', record)
return record
def count_records(self, context, criterion=None):
if criterion is None:
criterion = {}
target = {
'tenant_id': criterion.get('tenant_id', None)
}
policy.check('count_records', context, target)
return self.storage.count_records(context, criterion)
# Diagnostics Methods
def _sync_domain(self, context, domain):
recordsets = self.storage.find_recordsets(
context, criterion={'domain_id': domain['id']})
# Since we now have records as well as recordsets we need to get the
# records for it as well and pass that down since the backend wants it.
rdata = []
for recordset in recordsets:
records = self.find_records(
context, {'recordset_id': recordset.id})
rdata.append((recordset, records))
with wrap_backend_call():
return self.backend.sync_domain(context, domain, rdata)
@transaction
def sync_domains(self, context):
policy.check('diagnostics_sync_domains', context)
domains = self.storage.find_domains(context)
results = {}
for domain in domains:
results[domain.id] = self._sync_domain(context, domain)
return results
@transaction
def sync_domain(self, context, domain_id):
domain = self.storage.get_domain(context, domain_id)
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'tenant_id': domain.tenant_id
}
policy.check('diagnostics_sync_domain', context, target)
return self._sync_domain(context, domain)
@transaction
def sync_record(self, context, domain_id, recordset_id, record_id):
domain = self.storage.get_domain(context, domain_id)
recordset = self.storage.get_recordset(context, recordset_id)
target = {
'domain_id': domain_id,
'domain_name': domain.name,
'recordset_id': recordset_id,
'recordset_name': recordset.name,
'record_id': record_id,
'tenant_id': domain.tenant_id
}
policy.check('diagnostics_sync_record', context, target)
record = self.storage.get_record(context, record_id)
with wrap_backend_call():
return self.backend.sync_record(context, domain, recordset, record)
def ping(self, context):
policy.check('diagnostics_ping', context)
try:
backend_status = self.backend.ping(context)
except Exception as e:
backend_status = {'status': False, 'message': str(e)}
try:
storage_status = self.storage.ping(context)
except Exception as e:
storage_status = {'status': False, 'message': str(e)}
if backend_status and storage_status:
status = True
else:
status = False
return {
'host': cfg.CONF.host,
'status': status,
'backend': backend_status,
'storage': storage_status
}
def _determine_floatingips(self, context, fips, records=None,
tenant_id=None):
"""
Given the context or tenant, records and fips it returns the valid
floatingips either with a associated record or not. Deletes invalid
records also.
Returns a list of tuples with FloatingIPs and it's Record.
"""
tenant_id = tenant_id or context.tenant
elevated_context = context.elevated()
elevated_context.all_tenants = True
criterion = {
'managed': True,
'managed_resource_type': 'ptr:floatingip',
}
records = self.find_records(elevated_context, criterion)
records = dict([(r['managed_extra'], r) for r in records])
invalid = []
data = {}
# First populate the list of FIPS
for fip_key, fip_values in fips.items():
# Check if the FIP has a record
record = records.get(fip_values['address'])
# NOTE: Now check if it's owned by the tenant that actually has the
# FIP in the external service and if not invalidate it (delete it)
# thus not returning it with in the tuple with the FIP, but None..
if record:
record_tenant = record['managed_tenant_id']
if record_tenant != tenant_id:
msg = "Invalid FloatingIP %s belongs to %s but record " \
"owner %s"
LOG.debug(msg, fip_key, tenant_id, record_tenant)
invalid.append(record)
record = None
data[fip_key] = (fip_values, record)
return data, invalid
def _invalidate_floatingips(self, context, records):
"""
Utility method to delete a list of records.
"""
elevated_context = context.elevated()
elevated_context.all_tenants = True
if records > 0:
for r in records:
msg = 'Deleting record %s for FIP %s'
LOG.debug(msg, r['id'], r['managed_resource_id'])
self.delete_record(elevated_context, r['domain_id'],
r['recordset_id'], r['id'])
def _format_floatingips(self, context, data, recordsets=None):
"""
Given a list of FloatingIP and Record tuples we look through creating
a new dict of FloatingIPs
"""
elevated_context = context.elevated()
elevated_context.all_tenants = True
fips = {}
for key, value in data.items():
fip_ptr = {
'address': value[0]['address'],
'id': value[0]['id'],
'region': value[0]['region'],
'ptrdname': None,
'ttl': None,
'description': None
}
# TTL population requires a present record in order to find the
# RS or Zone
if value[1]:
# We can have a recordset dict passed in
if (recordsets is not None and
value[1]['recordset_id'] in recordsets):
recordset = recordsets[value[1]['recordset_id']]
else:
recordset = self.storage.get_recordset(
elevated_context, value[1]['recordset_id'])
if recordset['ttl'] is not None:
fip_ptr['ttl'] = recordset['ttl']
else:
zone = self.get_domain(
elevated_context, value[1]['domain_id'])
fip_ptr['ttl'] = zone['ttl']
fip_ptr['ptrdname'] = value[1]['data']
else:
LOG.debug("No record information found for %s" %
value[0]['id'])
# Store the "fip_record" with the region and it's id as key
fips[key] = fip_ptr
return fips
def _list_floatingips(self, context, region=None):
data = self.network_api.list_floatingips(context, region=region)
return self._list_to_dict(data, keys=['region', 'id'])
def _list_to_dict(self, data, keys=['id']):
new = {}
for i in data:
key = tuple([i[key] for key in keys])
new[key] = i
return new
def _get_floatingip(self, context, region, floatingip_id, fips):
if (region, floatingip_id) not in fips:
msg = 'FloatingIP %s in %s is not associated for tenant "%s"' % \
(floatingip_id, region, context.tenant)
raise exceptions.NotFound(msg)
return fips[region, floatingip_id]
# PTR ops
def list_floatingips(self, context):
"""
List Floating IPs PTR
A) We have service_catalog in the context and do a lookup using the
token pr Neutron in the SC
B) We lookup FIPs using the configured values for this deployment.
"""
elevated_context = context.elevated()
elevated_context.all_tenants = True
tenant_fips = self._list_floatingips(context)
valid, invalid = self._determine_floatingips(
elevated_context, tenant_fips)
self._invalidate_floatingips(context, invalid)
return self._format_floatingips(context, valid).values()
def get_floatingip(self, context, region, floatingip_id):
"""
Get Floating IP PTR
"""
elevated_context = context.elevated()
elevated_context.all_tenants = True
tenant_fips = self._list_floatingips(context, region=region)
self._get_floatingip(context, region, floatingip_id, tenant_fips)
valid, invalid = self._determine_floatingips(
elevated_context, tenant_fips)
self._invalidate_floatingips(context, invalid)
mangled = self._format_floatingips(context, valid)
return mangled[region, floatingip_id]
def _set_floatingip_reverse(self, context, region, floatingip_id, values):
"""
Set the FloatingIP's PTR record based on values.
"""
values.setdefault('description', None)
elevated_context = context.elevated()
elevated_context.all_tenants = True
tenant_fips = self._list_floatingips(context, region=region)
fip = self._get_floatingip(context, region, floatingip_id, tenant_fips)
zone_name = self.network_api.address_zone(fip['address'])
# NOTE: Find existing zone or create it..
try:
zone = self.storage.find_domain(
elevated_context, {'name': zone_name})
except exceptions.DomainNotFound:
msg = _LI('Creating zone for %(fip_id)s:%(region)s - '
'%(fip_addr)s zone %(zonename)s') % \
{'fip_id': floatingip_id, 'region': region,
'fip_addr': fip['address'], 'zonename': zone_name}
LOG.info(msg)
email = cfg.CONF['service:central'].managed_resource_email
tenant_id = cfg.CONF['service:central'].managed_resource_tenant_id
zone_values = {
'name': zone_name,
'email': email,
'tenant_id': tenant_id
}
zone = self.create_domain(
elevated_context, objects.Domain(**zone_values))
record_name = self.network_api.address_name(fip['address'])
try:
# NOTE: Delete the current recormdset if any (also purges records)
LOG.debug("Removing old RRset / Record")
rset = self.find_recordset(
elevated_context, {'name': record_name, 'type': 'PTR'})
records = self.find_records(
elevated_context, {'recordset_id': rset['id']})
for record in records:
self.delete_record(
elevated_context,
rset['domain_id'],
rset['id'],
record['id'])
self.delete_recordset(elevated_context, zone['id'], rset['id'])
except exceptions.RecordSetNotFound:
pass
recordset_values = {
'name': record_name,
'type': 'PTR',
'ttl': values.get('ttl', None),
}
recordset = self.create_recordset(
elevated_context,
zone['id'],
objects.RecordSet(**recordset_values))
record_values = {
'data': values['ptrdname'],
'description': values['description'],
'managed': True,
'managed_extra': fip['address'],
'managed_resource_id': floatingip_id,
'managed_resource_region': region,
'managed_resource_type': 'ptr:floatingip',
'managed_tenant_id': context.tenant
}
record = self.create_record(
elevated_context,
zone['id'],
recordset['id'],
objects.Record(**record_values))
mangled = self._format_floatingips(
context, {(region, floatingip_id): (fip, record)},
{recordset['id']: recordset})
return mangled[region, floatingip_id]
def _unset_floatingip_reverse(self, context, region, floatingip_id):
"""
Unset the FloatingIP PTR record based on the
Service's FloatingIP ID > managed_resource_id
Tenant ID > managed_tenant_id
We find the record based on the criteria and delete it or raise.
"""
elevated_context = context.elevated()
elevated_context.all_tenants = True
criterion = {
'managed_resource_id': floatingip_id,
'managed_tenant_id': context.tenant
}
try:
record = self.storage.find_record(
elevated_context, criterion=criterion)
except exceptions.RecordNotFound:
msg = 'No such FloatingIP %s:%s' % (region, floatingip_id)
raise exceptions.NotFound(msg)
self.delete_record(
elevated_context,
record['domain_id'],
record['recordset_id'],
record['id'])
@transaction
def update_floatingip(self, context, region, floatingip_id, values):
"""
We strictly see if values['ptrdname'] is str or None and set / unset
the requested FloatingIP's PTR record based on that.
"""
if values['ptrdname'] is None:
self._unset_floatingip_reverse(context, region, floatingip_id)
elif isinstance(values['ptrdname'], basestring):
return self._set_floatingip_reverse(
context, region, floatingip_id, values)
# Blacklisted Domains
@transaction
def create_blacklist(self, context, blacklist):
policy.check('create_blacklist', context)
created_blacklist = self.storage.create_blacklist(context, blacklist)
self.notifier.info(context, 'dns.blacklist.create', created_blacklist)
return created_blacklist
def get_blacklist(self, context, blacklist_id):
policy.check('get_blacklist', context)
blacklist = self.storage.get_blacklist(context, blacklist_id)
return blacklist
def find_blacklists(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
policy.check('find_blacklists', context)
blacklists = self.storage.find_blacklists(context, criterion,
marker, limit,
sort_key, sort_dir)
return blacklists
def find_blacklist(self, context, criterion):
policy.check('find_blacklist', context)
blacklist = self.storage.find_blacklist(context, criterion)
return blacklist
@transaction
def update_blacklist(self, context, blacklist):
target = {
'blacklist_id': blacklist.id,
}
policy.check('update_blacklist', context, target)
blacklist = self.storage.update_blacklist(context, blacklist)
self.notifier.info(context, 'dns.blacklist.update', blacklist)
return blacklist
@transaction
def delete_blacklist(self, context, blacklist_id):
policy.check('delete_blacklist', context)
blacklist = self.storage.delete_blacklist(context, blacklist_id)
self.notifier.info(context, 'dns.blacklist.delete', blacklist)
| |
# -*- coding: utf-8 -*-
"""
sale.py
"""
import warnings
from trytond.model import fields
from trytond.pool import PoolMeta, Pool
from trytond.pyson import Eval
__all__ = ['SaleLine', 'Sale']
__metaclass__ = PoolMeta
class Sale:
"Sale"
__name__ = 'sale.sale'
is_international_shipping = fields.Function(
fields.Boolean("Is International Shipping"),
'on_change_with_is_international_shipping'
)
package_weight = fields.Function(
fields.Float(
"Package weight", digits=(16, Eval('weight_digits', 2)),
depends=['weight_digits'],
),
'get_package_weight'
)
total_weight = fields.Function(
fields.Float(
"Total weight", digits=(16, Eval('weight_digits', 2)),
depends=['weight_digits'],
),
'get_total_weight'
)
weight_uom = fields.Function(
fields.Many2One('product.uom', 'Weight UOM'),
'get_weight_uom'
)
weight_digits = fields.Function(
fields.Integer('Weight Digits'), 'on_change_with_weight_digits'
)
@classmethod
def __setup__(cls):
super(Sale, cls).__setup__()
cls._error_messages.update({
'warehouse_address_missing': 'Warehouse address is missing',
})
@fields.depends('weight_uom')
def on_change_with_weight_digits(self, name=None):
if self.weight_uom:
return self.weight_uom.digits
return 2
def get_weight_uom(self, name):
"""
Returns weight uom for the package
"""
return self._get_weight_uom().id
def _get_weight_uom(self):
"""
Returns Pound as default value for uom
Downstream module can override this method to change weight uom as per
carrier
"""
UOM = Pool().get('product.uom')
return UOM.search([('symbol', '=', 'lb')])[0]
def get_package_weight(self, name):
"""
Returns sum of weight associated with each line
"""
warnings.warn(
'Field package_weight is depricated, use total_weight instead',
DeprecationWarning, stacklevel=2
)
weight_uom = self._get_weight_uom()
return self._get_package_weight(weight_uom)
def get_total_weight(self, name):
"""
Returns sum of weight associated with each line
"""
weight_uom = self._get_weight_uom()
return self._get_total_weight(weight_uom)
@fields.depends('party', 'shipment_address', 'warehouse')
def on_change_with_is_international_shipping(self, name=None):
"""
Return True if international shipping
"""
from_address = self._get_ship_from_address()
if self.shipment_address and from_address and \
from_address.country and self.shipment_address.country and \
from_address.country != self.shipment_address.country:
return True
return False
def _get_package_weight(self, uom):
"""
Returns sum of weight associated with package
"""
warnings.warn(
'_get_package_weight is depricated, use _get_total_weight instead',
DeprecationWarning, stacklevel=2
)
return sum(
map(
lambda line: line.get_weight(uom, silent=True),
self.lines
)
)
def _get_total_weight(self, uom):
"""
Returns sum of weight for given uom
"""
return sum(
map(
lambda line: line.get_weight(uom, silent=True),
self.lines
)
)
def _get_ship_from_address(self):
"""
Usually the warehouse from which you ship
"""
if not self.warehouse.address:
return self.raise_user_error('warehouse_address_missing')
return self.warehouse and self.warehouse.address
def add_shipping_line(self, shipment_cost, description):
"""
This method takes shipping_cost and description as arguments and writes
a shipping line. It deletes any previous shipping lines which have
a shipment_cost.
:param shipment_cost: The shipment cost calculated according to carrier
:param description: Shipping line description
"""
self.__class__.write([self], {
'lines': [
('create', [{
'type': 'line',
'product': self.carrier.carrier_product.id,
'description': description,
'quantity': 1, # XXX
'unit': self.carrier.carrier_product.sale_uom.id,
'unit_price': shipment_cost,
'shipment_cost': shipment_cost,
'amount': shipment_cost,
'taxes': [],
'sequence': 9999, # XXX
}]),
('delete', [
line for line in self.lines if line.shipment_cost
]),
]
})
class SaleLine:
'Sale Line'
__name__ = 'sale.line'
@classmethod
def __setup__(cls):
super(SaleLine, cls).__setup__()
cls._error_messages.update({
'weight_required': 'Weight is missing on the product %s',
})
def get_weight(self, weight_uom, silent=False):
"""
Returns weight as required for carriers
:param weight_uom: Weight uom used by carriers
:param silent: Raise error if not silent
"""
ProductUom = Pool().get('product.uom')
if not self.product or self.quantity <= 0 or \
self.product.type == 'service':
return 0
if not self.product.weight:
if silent:
return 0
self.raise_user_error(
'weight_required',
error_args=(self.product.name,)
)
# Find the quantity in the default uom of the product as the weight
# is for per unit in that uom
if self.unit != self.product.default_uom:
quantity = ProductUom.compute_qty(
self.unit,
self.quantity,
self.product.default_uom
)
else:
quantity = self.quantity
weight = self.product.weight * quantity
# Compare product weight uom with the weight uom used by carrier
# and calculate weight if botth are not same
if self.product.weight_uom.symbol != weight_uom.symbol:
weight = ProductUom.compute_qty(
self.product.weight_uom,
weight,
weight_uom,
)
return weight
| |
"""
Test suite for the productdb.tasks module
"""
import pytest
import pandas as pd
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from django.test import Client
from app.config.settings import AppSettings
from app.config.models import NotificationMessage
from app.productdb import tasks, models
from app.productdb.excel_import import ProductsExcelImporter, ProductMigrationsExcelImporter
pytestmark = pytest.mark.django_db
class BaseProductsExcelImporterMock(ProductsExcelImporter):
def verify_file(self):
# set validation to true unconditional
self.valid_file = True
def _load_workbook(self):
# ignore the load workbook function
return
def _create_data_frame(self):
# add a predefined DataFrame for the file import
self.__wb_data_frame__ = pd.DataFrame([
["Product A", "description of Product A", "4000.00", "USD", "Cisco Systems"]
], columns=[
"product id",
"description",
"list price",
"currency",
"vendor",
])
class BaseProductMigrationsExcelImporterMock(ProductMigrationsExcelImporter):
def verify_file(self):
# set validation to true unconditional
self.valid_file = True
def _load_workbook(self):
# ignore the load workbook function
return
def _create_data_frame(self):
# add a predefined DataFrame for the file import
self.__wb_data_frame__ = pd.DataFrame([
["Product A", "Cisco Systems", "Migration Source", "Replacement that is not in the database", "comment", ""],
["Product A", "Cisco Systems", "Other Migration Source", "Replacement that is not in the database", "comment", ""]
], columns=[
"product id",
"vendor",
"migration source",
"replacement product id",
"comment",
"migration product info url"
])
class InvalidProductsImportProductsExcelFileMock(BaseProductsExcelImporterMock):
invalid_products = 100
def import_to_database(self, **kwargs):
pass
@pytest.fixture
def suppress_state_update_in_tasks(monkeypatch):
monkeypatch.setattr(tasks.import_price_list, "update_state", lambda state, meta: None)
monkeypatch.setattr(tasks.import_product_migrations, "update_state", lambda state, meta: None)
monkeypatch.setattr(tasks.perform_product_check, "update_state", lambda state, meta: None)
@pytest.mark.usefixtures("suppress_state_update_in_tasks")
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestRunProductCheckTask:
def test_successful_execution(self):
pc = models.ProductCheck.objects.create(name="Test", input_product_ids="Test")
result = tasks.perform_product_check(product_check_id=pc.id)
assert "status_message" in result
assert models.ProductCheckEntry.objects.all().count() == 1
def test_failed_execution(self):
result = tasks.perform_product_check(product_check_id=9999)
assert "error_message" in result
assert models.ProductCheckEntry.objects.all().count() == 0
@pytest.mark.usefixtures("suppress_state_update_in_tasks")
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestImportProductMigrationsTask:
def test_successful_full_import_product_migration_task(self, monkeypatch):
# replace the ProductMigrationsExcelImporter class
monkeypatch.setattr(tasks, "ProductMigrationsExcelImporter", BaseProductMigrationsExcelImporterMock)
models.Product.objects.create(product_id="Product A", vendor=models.Vendor.objects.get(id=1))
jf = models.JobFile.objects.create(file=SimpleUploadedFile("myfile.xlsx", b"xyz"))
result = tasks.import_product_migrations(
job_file_id=jf.id,
user_for_revision=User.objects.get(username="api")
)
assert "status_message" in result, "If successful, a status message should be returned"
assert models.JobFile.objects.count() == 0, "Should be deleted after the task was completed"
assert models.ProductMigrationSource.objects.count() == 2, "One Product Migration Source was created"
assert models.ProductMigrationOption.objects.count() == 2, "One Product Migration Option was created"
def test_call_with_invalid_invalid_file_format(self):
jf = models.JobFile.objects.create(file=SimpleUploadedFile("myfile.xlsx", b"xyz"))
expected_message = "import failed, invalid file format ("
result = tasks.import_product_migrations(
job_file_id=jf.id,
user_for_revision=User.objects.get(username="api")
)
assert "error_message" in result
assert result["error_message"].startswith(expected_message)
def test_call_with_invalid_job_file_id(self):
result = tasks.import_product_migrations(
job_file_id=9999,
user_for_revision=User.objects.get(username="api")
)
assert "error_message" in result
assert "Cannot find file that was uploaded." == result["error_message"]
@pytest.mark.usefixtures("suppress_state_update_in_tasks")
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestImportPriceListTask:
def test_successful_full_import_price_list_task(self, monkeypatch):
# replace the ProductsExcelImporter class
monkeypatch.setattr(tasks, "ProductsExcelImporter", BaseProductsExcelImporterMock)
jf = models.JobFile.objects.create(file=SimpleUploadedFile("myfile.xlsx", b"xyz"))
result = tasks.import_price_list(
job_file_id=jf.id,
create_notification_on_server=True,
update_only=False,
user_for_revision=User.objects.get(username="api")
)
assert "status_message" in result, "If successful, a status message should be returned"
assert models.JobFile.objects.count() == 0, "Should be deleted after the task was completed"
assert models.Product.objects.count() == 1, "One Product was created"
def test_successful_update_only_import_price_list_task(self, monkeypatch):
# replace the ProductsExcelImporter class
monkeypatch.setattr(tasks, "ProductsExcelImporter", BaseProductsExcelImporterMock)
# import in update only mode
jf = models.JobFile.objects.create(file=SimpleUploadedFile("myfile.xlsx", b"xyz"))
result = tasks.import_price_list(
job_file_id=jf.id,
create_notification_on_server=True,
update_only=True,
user_for_revision=User.objects.get(username="api")
)
assert "status_message" in result, "If successful, a status message should be returned"
assert models.JobFile.objects.count() == 0, "Should be deleted after the task was completed"
assert models.Product.objects.count() == 0, "One Product was created"
# create the product
models.Product.objects.create(product_id="Product A", vendor=models.Vendor.objects.get(name__startswith="Cisco"))
jf = models.JobFile.objects.create(file=SimpleUploadedFile("myfile.xlsx", b"xyz"))
result = tasks.import_price_list(
job_file_id=jf.id,
create_notification_on_server=True,
update_only=True,
user_for_revision=User.objects.get(username="api")
)
assert "status_message" in result, "If successful, a status message should be returned"
assert models.JobFile.objects.count() == 0, "Should be deleted after the task was completed"
assert models.Product.objects.count() == 1, "One Product was created"
p = models.Product.objects.get(product_id="Product A")
assert "description of Product A" == p.description
def test_notification_message_on_import_price_list_task(self, monkeypatch):
# replace the ProductsExcelImporter class
monkeypatch.setattr(tasks, "ProductsExcelImporter", BaseProductsExcelImporterMock)
jf = models.JobFile.objects.create(file=SimpleUploadedFile("myfile.xlsx", b"xyz"))
result = tasks.import_price_list(
job_file_id=jf.id,
create_notification_on_server=False,
update_only=False,
user_for_revision=User.objects.get(username="api")
)
assert "status_message" in result, "If successful, a status message should be returned"
assert NotificationMessage.objects.count() == 0, "No notification message is created"
assert models.JobFile.objects.count() == 0, "Should be deleted after the task was completed"
jf = models.JobFile.objects.create(file=SimpleUploadedFile("myfile.xlsx", b"xyz"))
result = tasks.import_price_list(
job_file_id=jf.id,
create_notification_on_server=True,
update_only=False,
user_for_revision=User.objects.get(username="api")
)
assert "status_message" in result
assert NotificationMessage.objects.count() == 1
assert models.JobFile.objects.count() == 0, "Should be deleted after the task was completed"
def test_call_with_invalid_products(self, monkeypatch):
# replace the ProductsExcelImporter class
monkeypatch.setattr(tasks, "ProductsExcelImporter", InvalidProductsImportProductsExcelFileMock)
jf = models.JobFile.objects.create(file=SimpleUploadedFile("myfile.xlsx", b"xyz"))
expected_message = "100 entries are invalid. Please check the following messages for more details."
result = tasks.import_price_list(
job_file_id=jf.id,
create_notification_on_server=False,
update_only=False,
user_for_revision=User.objects.get(username="api")
)
assert "status_message" in result
assert expected_message in result["status_message"]
def test_call_with_invalid_invalid_file_format(self):
jf = models.JobFile.objects.create(file=SimpleUploadedFile("myfile.xlsx", b"xyz"))
expected_message = "import failed, invalid file format ("
result = tasks.import_price_list(
job_file_id=jf.id,
create_notification_on_server=False,
update_only=False,
user_for_revision=User.objects.get(username="api")
)
assert "error_message" in result
assert result["error_message"].startswith(expected_message)
def test_call_with_invalid_job_file_id(self):
result = tasks.import_price_list(
job_file_id=9999,
create_notification_on_server=True,
update_only=False,
user_for_revision=User.objects.get(username="api")
)
assert "error_message" in result
assert "Cannot find file that was uploaded." == result["error_message"]
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
@pytest.mark.usefixtures("redis_server_required")
def test_trigger_manual_cisco_eox_synchronization():
app_config = AppSettings()
app_config.set_cisco_api_enabled(True)
app_config.set_periodic_sync_enabled(True)
# schedule Cisco EoX API update
url = reverse('cisco_api:start_cisco_eox_api_sync_now')
client = Client()
client.login(username="pdb_admin", password="pdb_admin")
resp = client.get(url)
assert resp.status_code == 302
# verify that task ID is saved in the cache (set by the schedule call)
task_id = cache.get("CISCO_EOX_API_SYN_IN_PROGRESS", "")
assert task_id != ""
def test_delete_all_product_checks():
models.ProductCheck.objects.create(name="Test", input_product_ids="Test")
tasks.delete_all_product_checks()
assert models.ProductCheck.objects.all().count() == 0
| |
# -*- coding: utf-8 -*-
from django.apps import apps
from django.db import models
from django.conf import settings
from django.contrib.auth.models import Group, UserManager
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from cms.models import Page
from cms.models.managers import (PagePermissionManager,
GlobalPagePermissionManager)
from cms.utils.helpers import reversion_register
# Cannot use contrib.auth.get_user_model() at compile time.
user_app_name, user_model_name = settings.AUTH_USER_MODEL.rsplit('.', 1)
User = None
try:
User = apps.get_registered_model(user_app_name, user_model_name)
except KeyError:
pass
if User is None:
raise ImproperlyConfigured(
"You have defined a custom user model %s, but the app %s is not "
"in settings.INSTALLED_APPS" % (settings.AUTH_USER_MODEL, user_app_name)
)
# NOTE: those are not just numbers!! we will do binary AND on them,
# so pay attention when adding/changing them, or MASKs..
ACCESS_PAGE = 1
ACCESS_CHILDREN = 2 # just immediate children (1 level)
ACCESS_PAGE_AND_CHILDREN = 3 # just immediate children (1 level)
ACCESS_DESCENDANTS = 4
ACCESS_PAGE_AND_DESCENDANTS = 5
# binary masks for ACCESS permissions
MASK_PAGE = 1
MASK_CHILDREN = 2
MASK_DESCENDANTS = 4
ACCESS_CHOICES = (
(ACCESS_PAGE, _('Current page')),
(ACCESS_CHILDREN, _('Page children (immediate)')),
(ACCESS_PAGE_AND_CHILDREN, _('Page and children (immediate)')),
(ACCESS_DESCENDANTS, _('Page descendants')),
(ACCESS_PAGE_AND_DESCENDANTS, _('Page and descendants')),
)
class AbstractPagePermission(models.Model):
"""Abstract page permissions
"""
# who:
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("user"), blank=True, null=True)
group = models.ForeignKey(Group, verbose_name=_("group"), blank=True, null=True)
# what:
can_change = models.BooleanField(_("can edit"), default=True)
can_add = models.BooleanField(_("can add"), default=True)
can_delete = models.BooleanField(_("can delete"), default=True)
can_change_advanced_settings = models.BooleanField(_("can change advanced settings"), default=False)
can_publish = models.BooleanField(_("can publish"), default=True)
can_change_permissions = models.BooleanField(_("can change permissions"), default=False, help_text=_("on page level"))
can_move_page = models.BooleanField(_("can move"), default=True)
can_view = models.BooleanField(_("view restricted"), default=False, help_text=_("frontend view restriction"))
class Meta:
abstract = True
app_label = 'cms'
def clean(self):
super(AbstractPagePermission, self).clean()
if not self.user and not self.group:
raise ValidationError(_('Please select user or group.'))
if self.can_change:
return
if self.can_add:
message = _("Users can't create a page without permissions "
"to change the created page. Edit permissions required.")
raise ValidationError(message)
if self.can_delete:
message = _("Users can't delete a page without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if self.can_publish:
message = _("Users can't publish a page without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if self.can_change_advanced_settings:
message = _("Users can't change page advanced settings without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if self.can_change_permissions:
message = _("Users can't change page permissions without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if self.can_move_page:
message = _("Users can't move a page without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
@property
def audience(self):
"""Return audience by priority, so: All or User, Group
"""
targets = filter(lambda item: item, (self.user, self.group,))
return ", ".join([force_text(t) for t in targets]) or 'No one'
def save(self, *args, **kwargs):
if not self.user and not self.group:
# don't allow `empty` objects
return
return super(AbstractPagePermission, self).save(*args, **kwargs)
def get_configured_actions(self):
actions = [action for action in self.get_permissions_by_action()
if self.has_configured_action(action)]
return actions
def has_configured_action(self, action):
permissions = self.get_permissions_by_action()[action]
return all(getattr(self, perm) for perm in permissions)
@classmethod
def get_all_permissions(cls):
perms = [
'can_add',
'can_change',
'can_delete',
'can_publish',
'can_change_advanced_settings',
'can_change_permissions',
'can_move_page',
'can_view',
]
return perms
@classmethod
def get_permissions_by_action(cls):
# Maps an action to the required flags on the
# PagePermission model or GlobalPagePermission model
permissions_by_action = {
'add_page': ['can_add', 'can_change'],
'change_page': ['can_change'],
'change_page_advanced_settings': ['can_change', 'can_change_advanced_settings'],
'change_page_permissions': ['can_change', 'can_change_permissions'],
'delete_page': ['can_change', 'can_delete'],
'delete_page_translation': ['can_change', 'can_delete'],
'move_page': ['can_change', 'can_move_page'],
'publish_page': ['can_change', 'can_publish'],
'view_page': ['can_view'],
}
return permissions_by_action
@python_2_unicode_compatible
class GlobalPagePermission(AbstractPagePermission):
"""Permissions for all pages (global).
"""
can_recover_page = models.BooleanField(
verbose_name=_("can recover pages"),
default=True,
help_text=_("can recover any deleted page"),
)
sites = models.ManyToManyField(
to=Site,
blank=True,
help_text=_('If none selected, user haves granted permissions to all sites.'),
verbose_name=_('sites'),
)
objects = GlobalPagePermissionManager()
class Meta:
verbose_name = _('Page global permission')
verbose_name_plural = _('Pages global permissions')
app_label = 'cms'
def __str__(self):
return "%s :: GLOBAL" % self.audience
@python_2_unicode_compatible
class PagePermission(AbstractPagePermission):
"""Page permissions for single page
"""
grant_on = models.IntegerField(_("Grant on"), choices=ACCESS_CHOICES, default=ACCESS_PAGE_AND_DESCENDANTS)
page = models.ForeignKey(Page, null=True, blank=True, verbose_name=_("page"))
objects = PagePermissionManager()
class Meta:
verbose_name = _('Page permission')
verbose_name_plural = _('Page permissions')
app_label = 'cms'
def __str__(self):
page = self.page_id and force_text(self.page) or "None"
return "%s :: %s has: %s" % (page, self.audience, force_text(self.get_grant_on_display()))
def clean(self):
super(PagePermission, self).clean()
if self.can_add and self.grant_on == ACCESS_PAGE:
# this is a misconfiguration - user can add/move page to current
# page but after he does this, he will not have permissions to
# access this page anymore, so avoid this.
message = _("Add page permission requires also access to children, "
"or descendants, otherwise added page can't be changed "
"by its creator.")
raise ValidationError(message)
def get_page_ids(self):
if self.grant_on & MASK_PAGE:
yield self.page_id
if self.grant_on & MASK_CHILDREN:
children = self.page.get_children().values_list('id', flat=True)
for child in children:
yield child
elif self.grant_on & MASK_DESCENDANTS:
if self.page.has_cached_descendants():
descendants = (page.pk for page in self.page.get_cached_descendants())
else:
descendants = self.page.get_descendants().values_list('id', flat=True).iterator()
for descendant in descendants:
yield descendant
class PageUserManager(UserManager):
use_in_migrations = False
class PageUser(User):
"""Cms specific user data, required for permission system
"""
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="created_users")
objects = PageUserManager()
class Meta:
verbose_name = _('User (page)')
verbose_name_plural = _('Users (page)')
app_label = 'cms'
class PageUserGroup(Group):
"""Cms specific group data, required for permission system
"""
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="created_usergroups")
class Meta:
verbose_name = _('User group (page)')
verbose_name_plural = _('User groups (page)')
app_label = 'cms'
manager_inheritance_from_future = True
reversion_register(PagePermission)
| |
import numpy as np
import numpy as np
import gsmlib.splibs as sp
import matplotlib.pyplot as plt
from gsmlib.SB import SB,SBTraining
from gsmlib.NB import NB,NBTraining
def readfile(fn):
f = open(fn)
l = []
for line in f.readlines():
line = line.replace("+-","-")
a = complex(line)
l.append(a)
return np.array(l)
mafi = readfile("../../../data/mafi")
training = readfile("../../../data/training")
rhh = readfile("../../../data/rhh")
mafi = mafi/rhh[2]
rhh = rhh/rhh[2]
def toState(t,r_i):
"""
1 -> 1
-1 -> 0
"""
s = 0
for x in t:
s *= 2
if r_i == 1:
if x.real>0:
s+=1
else:
if x.imag>0:
s+=1
r_i = 1 -r_i
return s
def s2s(s,r_i):
"""
inverse order
rf = 1
if = 0
"""
ret = np.zeros(5,dtype=complex)
if r_i==1:
inc = 1
else:
inc = 0
for i in range(5):
if (s&1)==1:
ret[i] = 1.+0j
else:
ret[i] = -1.+0j
s >>=1
if (i+inc)%2 == 0:
ret[i] *= 1.j
return ret
def table(r):
t = np.zeros((2,32),dtype=complex)
for r_i in range(2):
for s in range(32):
t[r_i][s]=np.dot(s2s(s,r_i),r)
return t
def mindiff(x,h):
y = h-x
yy = y*np.conj(y)
return bin(yy.argmin())
def t2b(t,r_i):
l = []
for x in t:
if r_i==0:
v = x.imag
else:
v = x.real
if v>0:
l.append(1)
else:
l.append(0)
r_i = 1 - r_i
return l
def maxState(h):
return h.argmax()
def forward(t,m,start,r_i,l):
(i,sn) = t.shape
sn /= 2
metrics = np.zeros((sn,l+1))
tracback = np.zeros((sn,l),dtype=int)
for i in range(sn):
metrics[i,0]=-1e100
metrics[start,0]=0
for i in range(l):
for s in range(sn/2):
# shift in 0
#print s,metrics[s,i],metrics[s+sn/2,i],m[i],t[r_i,s*2],t[r_i,s*2+sn],t[r_i,s*2+1],t[r_i,s*2+sn+1]
m00 = metrics[s,i]+(m[i]*t[r_i,s*2]).real
m08 = metrics[s+sn/2,i]+(m[i]*t[r_i,s*2+sn]).real
if m00>m08:
metrics[s*2,i+1]=m00
tracback[s*2,i]=0
else:
metrics[s*2,i+1]=m08
tracback[s*2,i]=1
#print m00,m08,
# shift in 1
m10 = metrics[s,i]+(m[i]*t[r_i,s*2+1]).real
m18 = metrics[s+sn/2,i]+(m[i]*t[r_i,s*2+sn+1]).real
if m10>m18:
metrics[s*2+1,i+1]=m10
tracback[s*2+1,i]=0
else:
metrics[s*2+1,i+1]=m18
tracback[s*2+1,i]=1
#print m10,m18
#print r_i,m[i],mindiff(m[i],t[r_i,:])
#print "%3d %3d"%(i,maxState(metrics[:,i+1])),tracback[:,i],m[i]
r_i = 1 - r_i
end = metrics[:,l]
ends = end.argmax()
print "end state",ends
ret = []
es = ends
for i in range(4):
ret.append(es&1)
es >>= 1
for i in range(l-1,0,-1):
b = tracback[ends,i]
ret.append(b)
ends /=2
ends += b*sn/2
return ret[::-1]
t = table(rhh)
fout = np.zeros(64,dtype=complex)
for i in range(64-5):
ss = toState(training[i:i+5],i%2)
r_i = i%2
#print training[i],"%4d"%(ss),t[r_i][ss],mafi[42+2+i]
fout[i]=t[r_i][ss]
y = t2b(training,0)
from gsmlib.viterbi_detector import viterbi_detector
v = viterbi_detector(5,44,training)
v.setTraining(training)
v.table(rhh)
#z = v.forward(mafi[42+62:])
#v.startFS = 0
#v.startBS = 0
a = v.forward(mafi[42+62:])
# v.table(np.conj(rhh))
# b = v.backward(mafi[::-1])
# b = b[::-1]
b = v.backward(mafi[:44])
x = forward(np.conj(t),mafi,0,0,len(mafi))
v.table(rhh)
ra = v.restore_forward(a,0,0)
rb = v.restore_backward(a,0,1)
# plt.figure(0)
# plt.plot(rb.real,'r')
# plt.plot(mafi.real,'b')
# plt.figure(1)
# plt.plot(rb.imag,'r')
# plt.plot(mafi.imag,'b')
# plt.show()
print "b:",
v.outMsg(v.dediff_backward(b,0,SBTraining.bits[3]))
print "a:",
v.outMsg(v.dediff_forward(a,0,SBTraining.bits[-4]))
print "x:",
v.outMsg(v.dediff_backward(x,1,0))
mafi = readfile("../../../data/nbmafi")
training = readfile("../../../data/nbtraining")
rhh = readfile("../../../data/nbrhh")
mafi = mafi/rhh[2]
rhh = rhh/rhh[2]
y = t2b(training,1)
print "y",
v.outMsg(y)
print training
v = viterbi_detector(5,61,training)
v.setTraining(training)
v.table(rhh)
#z = v.forward(mafi[42+62:])
#v.startFS = 0
#v.startBS = 0
a = v.forward(mafi[61+24:])
# v.table(np.conj(rhh))
# b = v.backward(mafi[::-1])
# b = b[::-1]
b = v.backward(mafi[:63])
x = forward(np.conj(t),mafi,0,0,len(mafi))
print "ox"
v.outMsg(x)
yy = v.t2b(mafi,1)
print "hd"
v.outMsg(yy)
v.table(rhh)
ra = v.restore_forward(a,0,0)
rb = v.restore_backward(a,0,1)
print "b:",
v.outMsg(v.dediff_backward(b,0,NBTraining.bits[5][3]))
print "a:",
v.outMsg(v.dediff_forward(a,0,NBTraining.bits[5][-4]))
print "x:",
v.outMsg(v.dediff_backward(x,1,0))
| |
from decimal import Decimal, InvalidOperation
__author__ = 'Duong Duc Anh'
from datetime import datetime
from django.contrib import auth
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db.models.sql.aggregates import Max
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseNotAllowed, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext, loader, Context
from YAASApp import form
from YAASApp.form import CreateUserForm, ChangeEmail, createAuction, BidForm
from YAASApp.models import Auction, Bid
from django.utils import translation
from django.core.mail import send_mail
# Create your views here.
def home(request):
return render_to_response("home.html", {'form': form},context_instance= RequestContext(request))
def register(request):
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
new_user = User();
new_user.username = form.cleaned_data['username']
new_user.email = form.cleaned_data['email']
new_user.set_password(form.cleaned_data['password'])
new_user.save()
return HttpResponseRedirect('/profile/')
else:
form = CreateUserForm(request.POST)
return render_to_response("registration.html", {'form': form},context_instance= RequestContext(request))
def login_user(request):
if request.method == 'POST':
username = request.POST.get('username', '')
password = request.POST.get('password', '')
nextTo = request.GET.get('next', '') #retrieving the url to redirect after successful login
user = auth.authenticate(username=username, password=password) #Authenticating the given user
if user is not None: #Check whether the user is authentication or not
auth.login(request,user) #Loging in the user
if len(nextTo) != 0:
return HttpResponseRedirect(nextTo)
else:
return HttpResponseRedirect('/profile/')
else:
error = "Please Sign in"
return render_to_response("login.html", {'error': error},context_instance= RequestContext(request))
return render_to_response("login.html", {},context_instance= RequestContext(request))
def all_auction(request):
# try:
# if not request.session["lang"]:
# request.session["lang"] = "sv"
# translation.activate(request.session["lang"])
# else:
# translation.activate(request.session["lang"])
# except KeyError:
# request.session["lang"] = "sv"
# translation.activate(request.session["lang"])
auctions = Auction.objects.all()
auctions = auctions.exclude(state='B')
t = loader.get_template("archive.html")
c = Context({'auctions' : auctions})
return HttpResponse(t.render(c))
def post(request, id):
auction = Auction.objects.get(id=id)
bids = Bid.objects.filter(auction_id=id)
t = loader.get_template("post.html")
c = Context({'auction' : auction, 'bids':bids})
return HttpResponse(t.render(c))
@login_required(login_url='/login/')
def user_profile(request):
""" User profile page """
user = User.objects.get(pk=request.user.pk)
return render_to_response('user_profile.html', {
'user': request.user,
})
@login_required(login_url='/login/')
def change_email(request):
# a common django idiom for forms
user = User.objects.get(pk=request.user.pk)
if request.method == 'POST':
form = ChangeEmail(request.POST)
if form.is_valid():
if form.cleaned_data['email'] == form.cleaned_data['email_confirm']:
user.email = form.cleaned_data['email']
user.save()
return HttpResponseRedirect('/profile/')
else:
form = ChangeEmail(request.POST)
else:
form = ChangeEmail(request.POST)
return render_to_response('email.html', {'form': form},context_instance= RequestContext(request))
# @login_required
# def add_auction(request):
# if not request.method == 'POST':
# form = createAuction()
# return render_to_response('createauction.html', {'form' : form}, context_instance=RequestContext(request))
# else:
# form = createAuction(request.POST)
# if form.is_valid():
# cd = form.cleaned_data
# a_title = cd['title']
# a_description = cd['description']
# a_min_price = cd['min_price']
# a_end_time = cd['end_time']
# auction = Auction(seller = request.user, title =a_title, description = a_description, min_price = a_min_price, end_time = a_end_time, state = 'A')
# auction.save()
# return HttpResponseRedirect('/allauction/')
# else:
# form = createAuction()
# return render_to_response('createauction.html', {'form' : form, "error" : "Not valid data" }, context_instance=RequestContext(request))
@login_required(login_url='/login/')
def add_auction(request):
if not request.method == 'POST':
form = createAuction()
return render_to_response('createauction.html', {'form' : form}, context_instance=RequestContext(request))
else:
form = createAuction(request.POST)
if form.is_valid():
cd = form.cleaned_data
a_title = cd['title']
a_description = cd['description']
a_min_price = cd['min_price']
a_end_time = cd['end_time']
#auction = Auction(seller = request.user, title =a_title, description = a_description, min_price = a_min_price, end_time = a_end_time, state = 'A')
#auction.save()
# form = createAuctionConf()
return render_to_response("addAuctionConf.html",{'a_title':a_title, 'a_description':a_description, 'a_min_price':a_min_price, 'a_end_time':a_end_time},
context_instance=RequestContext(request))
else:
form = createAuction()
return render_to_response('createauction.html', {'form' : form, "error" : "Not valid data" }, context_instance=RequestContext(request))
def add_auctionConf(request):
auction = Auction()
answer = request.POST['answer']
a_title = request.POST['a_title']
a_description=request.POST['a_description']
a_min_price=request.POST['a_min_price']
endtime_raw= request.POST['a_end_time']
endtime_str = str(endtime_raw)
try:
a_end_time = datetime.strptime(endtime_str, '%d-%m-%Y %H:%M')
except ValueError:
form = createAuction()
return render_to_response('createauction.html',{'form' : form, "error" : "Not valid data" },
context_instance=RequestContext(request))
if answer == 'Yes':
auction.title=a_title
auction.description=a_description
auction.min_price=a_min_price
auction.end_time=a_end_time
auction.seller_id=request.user.id
auction.state='A'
auction.save()
send_mail('No-reply: Auction', 'Your auction has been created successfully', 'admin@example.com',
[request.user.email], fail_silently=False)
return HttpResponseRedirect('/allauction/')
else:
return HttpResponseRedirect('/')
@login_required(login_url='/login/')
def edit_description(request, id):
articles = Auction.objects.filter(id = id)
if len(articles) > 0:
article = Auction.getByID(id)
else:
article = Auction(id)
if request.user.id == article.seller_id:
if request.method=="POST" and request.POST.has_key('description'):
article.description = request.POST["description"].strip()
article.save()
return HttpResponseRedirect('/allauction/'+id)
else:
return render_to_response("edit.html",
{'id':article.id, 'description':article.description.strip()},
context_instance=RequestContext(request))
else:
return HttpResponseRedirect('/allauction/')
@login_required(login_url='/login/')
def add_bid(request, id):
auction = get_object_or_404(Auction, id = id)
if auction.state == 'B':
return HttpResponseRedirect('/allauction/'+id)
if not request.method == 'POST':
return render_to_response("bid.html", {'id':auction.id},
context_instance=RequestContext(request))
if not request.user.id == auction.seller_id:
#print 'is not seller'
form = BidForm(request.POST)
new_bid_raw = request.POST['min_price']
try:
new_bid_dec = Decimal(new_bid_raw)
except InvalidOperation:
return render_to_response("bid.html", {'id':auction.id},
context_instance=RequestContext(request))
# if form.is_valid():
#print 'is valid'
bid_price = new_bid_dec
highest_bid = auction.min_price
prev_bidder = []
if len(Bid.objects.filter(auction_id = id)) > 0:
prev_bidder = [Bid.objects.filter(auction_id = id).latest('id').bidder.email]
highest_bid = Bid.objects.filter(auction_id = id).latest('id').bid_price
if bid_price>highest_bid:
print 'if condition'
new_bid = Bid(bid_price = bid_price, auction_id= id, bidder_id = request.user.id)
new_bid.save()
send_mail('No-reply: Bid', 'Your bid has been added successfully', 'admin@example.com',
[request.user.email, auction.seller.email]+prev_bidder, fail_silently=False)
return HttpResponseRedirect('/allauction/'+id)
else:
return render_to_response("bid.html", {'id':auction.id},
context_instance=RequestContext(request))
# highest_bid_amount = Bid.objects.filter(auction_id = id).aggregate(Max('bid_price')).get('amount__max')
# if (bid_price > highest_bid_amount):
# print 'is high'
# new_bid = Bid(bid_price = bid_price, auction_id= id, bidder_id = request.user.id)
# new_bid.save()
# return HttpResponseRedirect('/allauction/')
# else:
# print 'is not ok'
# return HttpResponseRedirect('/allauction/')
# return render_to_response("bid.html", {'id':auction.id},
# context_instance=RequestContext(request))
else:
return HttpResponseRedirect('/allauction/'+id)
def search(request):
if request.method == 'POST':
key_word = request.POST['search']
auctions = Auction.objects.filter(title__contains = key_word)
auctions = auctions.exclude(state = 'B')
results = [auction for auction in auctions]
return render_to_response('archive.html',{'auctions':results},context_instance=RequestContext(request))
else:
return render_to_response('search.html',context_instance=RequestContext(request))
def apisearch(request, title):
auction = get_object_or_404(Auction, title=title)
if not auction.state == 'B':
try:
json = serializers.serialize("json", [auction])
response = HttpResponse(json, mimetype="application/json")
response.status_code = 200
except (ValueError, TypeError, IndexError):
response = HttpResponse()
response.status_code = 400
return response
else:
response = HttpResponse()
response.status_code = 400
return response
# def switch_to_Swedish_link(request):
class APIBid:
def __call__(self, request, username, auction_id):
self.request = request
# Look up the user and throw a 404 if it doesn't exist
self.user = get_object_or_404(User, username=username)
if not request.method in ["ADDBID"]:
return HttpResponseNotAllowed(["ADDBID"])
# Check and store HTTP basic authentication, even for methods that
# don't require authorization.
self.authenticate()
# Call the request method handler
if request.method=="ADDBID":
return self.do_ADD()
def authenticate(self):
# Pull the auth info out of the Authorization: header
auth_info = self.request.META.get("HTTP_AUTHORIZATION", None)
#print self.request
print auth_info
if auth_info and auth_info.startswith("Basic "):
basic_info = auth_info.split(" ", 1)[1]
print basic_info, basic_info.decode("base64")
u, p = basic_info.decode("base64").split(":")
print u, p
self.user = u
# Authenticate against the User database. This will set
# authenticated_user to None if authentication fails.
self.authenticated_user = authenticate(username=u, password=p)
print "self.authenticated_user,", self.authenticated_user
else:
self.authenticated_user = None
def forbidden(self):
response = HttpResponseForbidden()
response["WWW-Authenticate"] = 'Basic realm="Auction"'
return response
def do_ADDBID(self):
if self.user != str(self.authenticated_user):
print "forbidden"
return self.forbidden()
else:
print 'add bid'
add_bid(request=self.request, id=self.auction_id)
return
class force_lang:
def __init__(self, new_lang):
self.new_lang = new_lang
self.old_lang = translation.get_language()
def __enter__(self):
translation.activate(self.new_lang)
def __exit__(self, type, value, tb):
translation.activate(self.old_lang)
#Vietnamese views
def home_vi(request):
with force_lang('vi'):
return render_to_response("home.html", {'form': form},context_instance= RequestContext(request))
| |
# -*- coding: utf-8 -*-
"""Object to read lines from a stream using an arbitrary delimiter"""
import math
# from itertools import filterfalse
# from unicodedata import combining, normalize
__all__ = ['ReadLines']
# the default amount of data to read on a buffer full
DEFAULT_BLOCK_SIZE = 1048756
# # the size to read when scanning for a non-combining character
# SCAN_SIZE = 512
# class _Reader: # pylint: disable=too-few-public-methods
# """Stream reader that accounts for combining characters"""
# def read(self, size):
# """Read data
# Arguments
# ---------
# size : integer
# The amount of data to read
# Returns
# -------
# The data read from the stream
# As with any stream, the length of the return may be less than the
# amount requested with a blank returned for EOF.
# Due to the nature and handling of combining characters, when a
# normalization form is specified, the return for reads from a text-mode
# stream may have more or less data than requested. Depending on the
# disposition of the stream, it is possible that the results could
# contain the entire contents of the stream regardless of how much data
# was requested.
# """
# form = self._form
# if form is None:
# # no normalization form means a direct read
# return self._fobj.read(size)
# fobj_read = self._fobj.read
# prev_extra = self._prev_extra
# if prev_extra:
# # include the previous extra data with the buffer
# buf = prev_extra
# buf_length = len(buf)
# if buf_length < size:
# # only execute a read if the previous extra data cannot
# # satisify the request
# buf += fobj_read(size - buf_length)
# else:
# buf = fobj_read(size)
# if isinstance(buf, bytes):
# if form is not None:
# raise ValueError('normalization not supported on binary '
# 'streams')
# scan_size = SCAN_SIZE
# # retrieve extra data to ensure the boundary does not split combined
# # characters
# extra = fobj_read(scan_size)
# while extra:
# # find the first occurrence of a non-combing character in the extra
# # data
# result = next(filterfalse(lambda x: combining(x[1]),
# enumerate(extra)), None)
# if result is not None:
# # a non-combining character was found in the extra data
# idx = result[0]
# # add all of the extra data upto the non-combining character
# # to the buffer
# buf += extra[:idx]
# # store all of the extra data from the non-combining character
# # on so it will be added to the buffer on the next read
# self._prev_extra = extra[idx:]
# break
# # if there is no occurrence of a non-combining character in the
# # extra data then add the extra data to the buffer and try again
# buf += extra
# extra = fobj_read(scan_size)
# else:
# self._prev_extra = '' # no extra data was read or it was
# # already added to the buffer
# return normalize(form, buf)
# def __init__(self, fobj, form=None):
# """
# Arguments
# ---------
# fobj : stream
# The stream from which to read
# form : string
# The normalization form to use
# The stream must be opened for reading and must be in blocking mode.
# If form is specified then the returned data is normalized with that
# form.
# """
# if form not in {'NFC', 'NFKC', 'NFD', 'NFKD', None}:
# raise ValueError('invalid normalization form')
# self._fobj = fobj
# self._form = form
# self._prev_extra = ''
class StreamExhausted(Exception): # pylint: disable=too-few-public-methods
"""Exception indicating the stream has been exhausted of data"""
pass
class ReadLines: # pylint: disable=too-many-instance-attributes
"""Iterator to read lines from a stream using an arbitrary delimiter"""
def peek(self, size=None):
"""Peek into the stream/buffer without advancing the current read
state
Arguments
---------
size : integer
The amount of data to read
Returns
-------
If size is specified then the returned amount is the same as if the
stream were being peek()'ed directly, i.e. the amount will include upto
the amount requested depending on how much data there is.
If size is omitted or None, the forth-coming delimited line will be
returned.
"""
if size is None: # request to peek at a line
try:
return self._get_line(consume=False)
except StreamExhausted:
return ''
if size < 0:
raise ValueError('invalid size: {}'.format(size))
if size == 0:
return ''
# truncate the buffer
buf, eof = self._buf[self._idx:], self._eof
fobj_read = self._fobj.read
block_size = self._block_size
# determine if more data is needed to satisfy the request
extra_needed = size - len(buf)
# while the steam has not been exhausted and more data is needed...
while not eof and extra_needed:
# determine how much data to read(in multiples of the block
# size) in order to satisfy the request
to_read = math.ceil(extra_needed / block_size) * block_size
tmp_buf = fobj_read(to_read)
if tmp_buf:
# more data has been received so it is added to the buffer
buf += tmp_buf
# determine if the read has satisfied the request
extra_needed = size - len(buf)
else:
self._eof = eof = True # no data has been received so EOF
# buffer was truncated
self._buf, self._idx = buf, 0
return buf[:size]
def __iter__(self):
return self
def __next__(self):
try:
return self._get_line(consume=True)
except StreamExhausted:
raise StopIteration
def _get_line(self, consume):
"""Get the next/cached line
Arguments
---------
consume : boolean
Indicator on whether the line is to be consumed
Returns
-------
The next line in the buffer/stream if no line has been cached or the
cached line from a previous call
This call will raise a StreamExhausted exception if there are no cached
lines available and there are no more lines to be read.
"""
line = self._line
if line is not None:
# a cached line is available
delimiter_pos = self._delimiter_pos
if consume:
# if consume is True then ensure that the next call will get
# the next line
self._line, self._delimiter_pos = None, None
if self.strip_delimiter:
return line[:delimiter_pos]
return line
# get the next line from the buffer/stream
line, delimiter_pos = self._get_next_line()
if consume:
# if consume is True then this line will not be cached
self._line, self._delimiter_pos = None, None
else:
# cache the line
self._line, self._delimiter_pos = line, delimiter_pos
if self.strip_delimiter:
return line[:delimiter_pos]
return line
def _get_next_line(self): # pylint: disable=too-many-branches
"""Get the next line
Returns
-------
A two-tuple of the next line in the stream and the index of where,
within the line, the delimiter starts if it is present or the length of
the line if it does not.
This call will raise a StreamExhausted exception if there are no more
lines to be read.
"""
fobj_read = self._fobj.read
block_size = self._block_size
delimiter = self.delimiter
buf, idx, eof = self._buf, self._idx, self._eof
# searching starts at the idx
search_idx = idx
while True:
# The delimiter is either str/bytes or a regex-like object
if isinstance(delimiter, (str, bytes)):
delimiter_start = buf.find(delimiter, search_idx)
if delimiter_start != -1:
# the length of the delimiter is added to where the
# delimiter starts to get the index of where it ends and
# the index attribute is set to indicate where in the
# buffer the next line begins
self._idx = end = delimiter_start + len(delimiter)
return buf[idx:end], delimiter_start - idx
# a match was not found but if the delimiter is more than one
# character then the delimiter could have been split so an
# offset is provided to start the search within the existing
# buffer
search_offset = len(delimiter) - 1
else:
result = delimiter.search(buf, # pylint: disable=no-member
search_idx)
if result:
delimiter_start = result.start()
end = result.end()
if end != result.endpos:
# if the match is not at the end of the buffer then it
# is an exact match regardless of whether the regex is
# greedy
# the index attribute is set to indicate where in the
# buffer the next line begins
self._idx = end
return buf[idx:end], delimiter_start - idx
# if the match is at the end of the buffer then reading
# more could result in a better match if the regex is
# greedy
# since a match was found, searching can begin at the point
# where the match started
search_offset = end - delimiter_start
else:
# the delimiter was not found in the buffer
delimiter_start = -1
# the buffer needs to be scanned from the beginning
search_offset = len(buf) - idx
if eof: # no more data is forth-coming
# ensure that another call will result in no search being
# performed
self._buf, self._idx = self._empty_buf, 0
end = len(buf)
if idx < end:
# there is unconsumed data in the buffer
# it is possible that a match exists but an attempt is
# being made to find a better match
if delimiter_start == -1:
# if there was no previous delimiter match then the
# final line contains no delimiter
delimiter_start = end
return buf[idx:end], delimiter_start - idx
raise StreamExhausted
# truncate the buffer
buf, idx = buf[idx:], 0
# search should commence at the where the buffer ends minus any
# offset that was previously provided
search_idx = len(buf) - search_offset
if search_idx < 0:
# ensure the search index does not start on a negative value
search_idx = 0
# get more data
more = fobj_read(block_size)
buf += more
self._buf = buf
if not more:
self._eof = eof = True
@property
def delimiter(self):
"""Delimiter getter"""
return self._delimiter
_delimiter = None
@delimiter.setter
def delimiter(self, value):
"""Delimiter setter"""
if isinstance(value, (str, bytes)):
if not value:
raise ValueError('non-zero match delimiter is required')
if isinstance(value, bytes) != self._binary:
raise ValueError('delimiter type must match stream mode')
elif hasattr(value, 'search'):
test_text = b'test' if self._binary else 'test'
try:
result = value.search(test_text)
except TypeError:
raise ValueError('delimiter type must match stream mode')
if result and result.start() == result.end():
raise ValueError('non-zero match delimiter is required')
else:
raise ValueError('unknown type of delimiter: {}'
.format(repr(value)))
self._delimiter = value
def __init__(self, fobj, *, delimiter='\n', strip_delimiter=False,
block_size=DEFAULT_BLOCK_SIZE):
"""
Arguments
----------
fobj : stream
Stream from which to read
delimiter : str, bytes, or regex
Criteria for how a line is terminated
strip_delimiter : boolean
Indicator on whether the delimiter should be included
in a returned line
block_size : integer
Size to use for reading from the stream
Attributes
----------
delimiter : str, bytes, or regex
Criteria for how a line is terminated
strip_delimiter : boolean
Indicator on whether the delimiter should be included
in a returned line
The stream must be opened for reading and must be blocking.
The *delimiter* type should match the mode of *fobj*. If *delimiter* is
str/bytes then the find() method of the internal buffer will be used.
If *delimiter* is regex then its search() method will be used.
The *delimiter* should match one or more characters.
"""
buf = fobj.read(block_size)
if isinstance(buf, bytes):
self._binary = True
self._empty_buf = b''
else:
self._binary = False
self._empty_buf = '' # pylint: disable=redefined-variable-type
self._fobj = fobj
self.strip_delimiter = strip_delimiter
self._block_size = block_size
self.delimiter = delimiter
self._buf, self._idx, self._eof = buf, 0, not buf
self._line, self._delimiter_pos = None, None
| |
"""logging facilities
The way to use this is as follows:
* each module declares its own logger, using:
from .logger import create_logger
logger = create_logger()
* then each module uses logger.info/warning/debug/etc according to the
level it believes is appropriate:
logger.debug('debugging info for developers or power users')
logger.info('normal, informational output')
logger.warning('warn about a non-fatal error or sth else')
logger.error('a fatal error')
... and so on. see the `logging documentation
<https://docs.python.org/3/howto/logging.html#when-to-use-logging>`_
for more information
* console interaction happens on stderr, that includes interactive
reporting functions like `help`, `info` and `list`
* ...except ``input()`` is special, because we can't control the
stream it is using, unfortunately. we assume that it won't clutter
stdout, because interaction would be broken then anyways
* what is output on INFO level is additionally controlled by commandline
flags
"""
import inspect
import json
import logging
import logging.config
import logging.handlers # needed for handlers defined there being configurable in logging.conf file
import os
import warnings
configured = False
# use something like this to ignore warnings:
# warnings.filterwarnings('ignore', r'... regex for warning message to ignore ...')
def _log_warning(message, category, filename, lineno, file=None, line=None):
# for warnings, we just want to use the logging system, not stderr or other files
msg = "{0}:{1}: {2}: {3}".format(filename, lineno, category.__name__, message)
logger = create_logger(__name__)
# Note: the warning will look like coming from here,
# but msg contains info about where it really comes from
logger.warning(msg)
def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', level='info', is_serve=False, json=False):
"""setup logging module according to the arguments provided
if conf_fname is given (or the config file name can be determined via
the env_var, if given): load this logging configuration.
otherwise, set up a stream handler logger on stderr (by default, if no
stream is provided).
if is_serve == True, we configure a special log format as expected by
the borg client log message interceptor.
"""
global configured
err_msg = None
if env_var:
conf_fname = os.environ.get(env_var, conf_fname)
if conf_fname:
try:
conf_fname = os.path.abspath(conf_fname)
# we open the conf file here to be able to give a reasonable
# error message in case of failure (if we give the filename to
# fileConfig(), it silently ignores unreadable files and gives
# unhelpful error msgs like "No section: 'formatters'"):
with open(conf_fname) as f:
logging.config.fileConfig(f)
configured = True
logger = logging.getLogger(__name__)
logger.debug('using logging configuration read from "{0}"'.format(conf_fname))
warnings.showwarning = _log_warning
return None
except Exception as err: # XXX be more precise
err_msg = str(err)
# if we did not / not successfully load a logging configuration, fallback to this:
logger = logging.getLogger('')
handler = logging.StreamHandler(stream)
if is_serve and not json:
fmt = '$LOG %(levelname)s %(name)s Remote: %(message)s'
else:
fmt = '%(message)s'
formatter = JsonFormatter(fmt) if json else logging.Formatter(fmt)
handler.setFormatter(formatter)
borg_logger = logging.getLogger('borg')
borg_logger.formatter = formatter
borg_logger.json = json
if configured and logger.handlers:
# The RepositoryServer can call setup_logging a second time to adjust the output
# mode from text-ish is_serve to json is_serve.
# Thus, remove the previously installed handler, if any.
logger.handlers[0].close()
logger.handlers.clear()
logger.addHandler(handler)
logger.setLevel(level.upper())
configured = True
logger = logging.getLogger(__name__)
if err_msg:
logger.warning('setup_logging for "{0}" failed with "{1}".'.format(conf_fname, err_msg))
logger.debug('using builtin fallback logging configuration')
warnings.showwarning = _log_warning
return handler
def find_parent_module():
"""find the name of a the first module calling this module
if we cannot find it, we return the current module's name
(__name__) instead.
"""
try:
frame = inspect.currentframe().f_back
module = inspect.getmodule(frame)
while module is None or module.__name__ == __name__:
frame = frame.f_back
module = inspect.getmodule(frame)
return module.__name__
except AttributeError:
# somehow we failed to find our module
# return the logger module name by default
return __name__
def create_logger(name=None):
"""lazily create a Logger object with the proper path, which is returned by
find_parent_module() by default, or is provided via the commandline
this is really a shortcut for:
logger = logging.getLogger(__name__)
we use it to avoid errors and provide a more standard API.
We must create the logger lazily, because this is usually called from
module level (and thus executed at import time - BEFORE setup_logging()
was called). By doing it lazily we can do the setup first, we just have to
be careful not to call any logger methods before the setup_logging() call.
If you try, you'll get an exception.
"""
class LazyLogger:
def __init__(self, name=None):
self.__name = name or find_parent_module()
self.__real_logger = None
@property
def __logger(self):
if self.__real_logger is None:
if not configured:
raise Exception("tried to call a logger before setup_logging() was called")
self.__real_logger = logging.getLogger(self.__name)
if self.__name.startswith('borg.debug.') and self.__real_logger.level == logging.NOTSET:
self.__real_logger.setLevel('WARNING')
return self.__real_logger
def getChild(self, suffix):
return LazyLogger(self.__name + '.' + suffix)
def setLevel(self, *args, **kw):
return self.__logger.setLevel(*args, **kw)
def log(self, *args, **kw):
if 'msgid' in kw:
kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
return self.__logger.log(*args, **kw)
def exception(self, *args, **kw):
if 'msgid' in kw:
kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
return self.__logger.exception(*args, **kw)
def debug(self, *args, **kw):
if 'msgid' in kw:
kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
return self.__logger.debug(*args, **kw)
def info(self, *args, **kw):
if 'msgid' in kw:
kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
return self.__logger.info(*args, **kw)
def warning(self, *args, **kw):
if 'msgid' in kw:
kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
return self.__logger.warning(*args, **kw)
def error(self, *args, **kw):
if 'msgid' in kw:
kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
return self.__logger.error(*args, **kw)
def critical(self, *args, **kw):
if 'msgid' in kw:
kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
return self.__logger.critical(*args, **kw)
return LazyLogger(name)
class JsonFormatter(logging.Formatter):
RECORD_ATTRIBUTES = (
'levelname',
'name',
'message',
# msgid is an attribute we made up in Borg to expose a non-changing handle for log messages
'msgid',
)
# Other attributes that are not very useful but do exist:
# processName, process, relativeCreated, stack_info, thread, threadName
# msg == message
# *args* are the unformatted arguments passed to the logger function, not useful now,
# become useful if sanitized properly (must be JSON serializable) in the code +
# fixed message IDs are assigned.
# exc_info, exc_text are generally uninteresting because the message will have that
def format(self, record):
super().format(record)
data = {
'type': 'log_message',
'time': record.created,
'message': '',
'levelname': 'CRITICAL',
}
for attr in self.RECORD_ATTRIBUTES:
value = getattr(record, attr, None)
if value:
data[attr] = value
return json.dumps(data)
| |
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.clock import Clock
from kivy_soil import kb_system
class AppRecycleBoxLayout(RecycleBoxLayout):
selected_widgets = None
'''Set with indexes of selected widgets'''
sel_first = -1
'''Index of first selected widget of current multiselect'''
sel_last = -1
'''Index of last selected widget of current multiselect'''
desel_index = 0
'''Index of last deselected widget'''
def __init__(self, **kwargs):
super(AppRecycleBoxLayout, self).__init__(**kwargs)
self.fbind('children', self.update_selected)
self.selected_widgets = set()
def on_data_update_sel(self, len_old, len_new):
'''Triggers selection update when data is changed'''
Clock.schedule_once(
lambda *a: self.on_data_next_frame_task(len_old, len_new), 0)
def on_data_next_frame_task(self, len_old, len_new):
'''Updates selection when data is changed'''
if self.sel_last > len_new:
if len_new < len_old:
self.sel_last = len_new - 1
if self.sel_first > len_new - 1:
self.sel_first = self.sel_last
self.selected_widgets.add(self.sel_last)
for i in list(self.selected_widgets):
if i > len_new - 1:
self.selected_widgets.remove(i)
self.update_selected()
self.scroll_to_selected()
def _get_modifier_mode(self):
mode = ''
if kb_system.held_ctrl and kb_system.held_shift:
mode = ''
elif kb_system.held_ctrl:
mode = 'ctrl'
elif kb_system.held_shift:
mode = 'shift'
return mode
def on_arrow_up(self):
'''Call this when up selection button is pressed,
Selects previous widget, supports multiselect with shift modifier'''
if self.desel_index and self.sel_last == -1:
self.sel_last = self.desel_index
self.desel_index = 0
if self.sel_last is 0:
return
mode = self._get_modifier_mode()
if self.children:
if mode in ('', 'ctrl'):
self.sel_first = self.sel_last - 1
self.sel_last = self.sel_first
self.selected_widgets = {self.sel_first}
elif mode == 'shift':
new_last = self.sel_last
new_last -= 1
if new_last >= self.sel_first:
self.add_remove_selected_set(self.sel_last)
elif new_last not in self.selected_widgets:
self.add_remove_selected_set(new_last)
self.sel_last = new_last
self.update_selected()
self.scroll_to_selected()
def on_arrow_down(self):
'''Call this when down selection button is pressed,
Selects next widget, supports multiselect with shift modifier'''
if self.desel_index and self.sel_last == -1:
self.sel_last = self.desel_index
self.desel_index = 0
sel_max = len(self.parent.data) - 1
mode = self._get_modifier_mode()
if self.children:
if mode in ('', 'ctrl'):
self.sel_first = min(self.sel_last + 1, sel_max)
self.sel_last = self.sel_first
self.selected_widgets = {self.sel_first}
elif mode == 'shift':
new_last = min(self.sel_last, sel_max)
if new_last != sel_max:
new_last += 1
if new_last <= self.sel_first:
self.add_remove_selected_set(self.sel_last)
elif new_last not in self.selected_widgets:
self.add_remove_selected_set(new_last)
self.sel_last = new_last
self.update_selected()
self.scroll_to_selected()
def select_with_touch(self, index):
'''Call to select with touch, supports single and multiple
multiselect modes with ctrl and shift modifiers'''
mode = self._get_modifier_mode()
if self.sel_first == -1:
self.sel_first = 0
if mode in ('', 'ctrl'):
self.sel_first = index
self.sel_last = self.sel_first
if not mode:
self.selected_widgets = {self.sel_first}
else:
self.add_remove_selected_set(index)
elif mode == 'shift':
self.sel_last = index
if self.sel_first < index:
start, end = self.sel_first, index
else:
start, end = index, self.sel_first
self.selected_widgets = set()
for x in range(start, end+1):
self.selected_widgets.add(x)
self.update_selected()
def select_all(self):
for i in range(len(self.parent.data)):
self.selected_widgets.add(i)
self.update_selected()
def deselect_all(self):
self.selected_widgets = set()
self.desel_index = self.sel_last
self.sel_first, self.sel_last = -1, -1
self.update_selected()
def add_remove_selected_set(self, index, index2=None):
'''Toggles index numbers in self.selected_widgets'''
if index in self.selected_widgets:
self.selected_widgets.remove(index)
if index2 and index2 in self.selected_widgets:
self.selected_widgets.remove(index2)
else:
self.selected_widgets.add(index)
def open_context_menu(self, pos=None):
widget = None
ret = None
if self.sel_last != -1:
for x in self.children:
if x.index == self.sel_last:
pos = x.to_window(x.right, x.y)
widget, widget_index = x, x.index
break
if widget:
ret = self.context_menu_function(widget, widget_index, pos)
else:
ret = self.context_menu_function(self, None, self.pos)
return ret
def context_menu_function(self, child, index, pos):
'''Stub method for context menu'''
pass
def get_widget_from_index(self, index):
'''Returns child with argument index when it is in view,
otherwise returns nothing'''
for x in self.children:
if x.index == index:
return x
def get_selected_widget(self, *args):
'''Returns child selected child when it is in view,
otherwise returns nothing'''
for x in self.children:
if x.selected:
return x
def scroll_to_selected(self, *args):
'''Scrolls view to self.sel_last index'''
self.parent.scroll_to_index(self.sel_last)
def update_selected(self, *args):
'''Calls apply_selection() on newly selected and deselected widgets'''
for x in self.children:
if x.index in self.selected_widgets:
if not x.selected:
x.apply_selection(True)
else:
if x.selected:
x.apply_selection(False)
if x.selected_last and x.index != self.sel_last:
x.selected_last = False
elif x.index == self.sel_last:
x.selected_last = True
| |
#!/usr/bin/env python
import os, sys, subprocess, logging, dxpy, json, re, socket, getpass, urlparse, datetime, requests, time, csv
import common
import dateutil.parser
logger = logging.getLogger(__name__)
EPILOG = '''Notes:
Examples:
%(prog)s
'''
DEFAULT_APPLET_PROJECT = 'E3 ChIP-seq'
def get_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('analysis_ids', nargs='*', default=None)
parser.add_argument('--infile', help='File containing analysis IDs to accession', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('--outfile', help='tsv table of files with metadata', type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument('--outdir', help='Directory for downloaded files', default=os.getcwd())
parser.add_argument('--assembly', help='Genome assembly like hg19 or mm10', required=True)
parser.add_argument('--debug', help="Print debug messages", default=False, action='store_true')
parser.add_argument('--project', help="Project name or ID", default=dxpy.WORKSPACE_ID)
parser.add_argument('--key', help="The keypair identifier from the keyfile.", default='www')
parser.add_argument('--keyfile', help="The keyfile.", default=os.path.expanduser("~/keypairs.json"))
parser.add_argument('--tag', help="A short string to add to the composite track longLabel")
parser.add_argument('--dryrun', help="Don't POST or upload", default=False, action='store_true')
parser.add_argument('--force', help="Try to POST and upload even if the file appears to be a duplicate", default=False, action='store_true')
args = parser.parse_args()
if args.debug:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else: #use the defaulf logging level
logging.basicConfig(format='%(levelname)s:%(message)s')
return args
def after(date1, date2):
return(dateutil.parser.parse(date1) > dateutil.parser.parse(date2))
def get_rep_bams(experiment, keypair, server):
original_files = [common.encoded_get(urlparse.urljoin(server,'%s' %(uri)), keypair) for uri in experiment.get('original_files')]
#resolve the biorep_n for each fastq
for fastq in [f for f in original_files if f.get('file_format') == 'fastq']:
replicate = common.encoded_get(urlparse.urljoin(server,'%s' %(fastq.get('replicate'))), keypair)
fastq.update({'biorep_n' : replicate.get('biological_replicate_number')})
#resolve the biorep_n's from derived_from for each bam
for bam in [f for f in original_files if f.get('file_format') == 'bam']:
biorep_ns = set()
for derived_from_uri in bam.get('derived_from'):
derived_from_accession = os.path.basename(derived_from_uri.strip('/')) #this assumes frame=object
biorep_ns.add(next(f.get('biorep_n') for f in original_files if f.get('accession') == derived_from_accession))
if len(biorep_ns) != 1:
print >> sys.stderr, "%s %s expected 1 biorep_n, found %d, skipping." %(experiment_accession, bam.get('accession'))
return
else:
biorep_n = biorep_ns.pop()
bam.update({'biorep_n': biorep_n})
#remove any bams that are older than another bam (resultsing in only the most recent surviving)
for bam in [f for f in original_files if f.get('file_format') == 'bam' and f.get('biorep_n') == biorep_n and after(bam.get('date_created'), f.get('date_created'))]:
original_files.remove(bam)
rep1_bam = next(f for f in original_files if f.get('file_format') == 'bam' and f.get('biorep_n') == 1)
rep2_bam = next(f for f in original_files if f.get('file_format') == 'bam' and f.get('biorep_n') == 2)
return rep1_bam, rep2_bam
def accession_file(f, keypair, server, dryrun, force):
#check for duplication
#download
#calculate md5 and add to f.md5sum
#post file and get accession, upload credentials
#upload to S3
#remove the local file (to save space)
#return the ENCODEd file object
already_accessioned = False
dx = f.pop('dx')
for tag in dx.tags:
m = re.search(r'(ENCFF\d{3}\D{3})|(TSTFF\D{6})', tag)
if m:
logger.info('%s appears to contain ENCODE accession number in tag %s ... skipping' %(dx.get_id(),m.group(0)))
already_accessioned = True
break
if already_accessioned and not force:
return
url = urlparse.urljoin(server, 'search/?type=file&submitted_file_name=%s&format=json&frame=object' %(f.get('submitted_file_name')))
r = requests.get(url,auth=keypair)
try:
r.raise_for_status()
if r.json()['@graph']:
for duplicate_item in r.json()['@graph']:
if duplicate_item.get('status') == 'deleted':
logger.info("A potential duplicate file was found but its status=deleted ... proceeding")
duplicate_found = False
else:
logger.info("Found potential duplicate: %s" %(duplicate_item.get('accession')))
if submitted_file_size == duplicate_item.get('file_size'):
logger.info("%s %s: File sizes match, assuming duplicate." %(str(submitted_file_size), duplicate_item.get('file_size')))
duplicate_found = True
break
else:
logger.info("%s %s: File sizes differ, assuming new file." %(str(submitted_file_size), duplicate_item.get('file_size')))
duplicate_found = False
else:
duplicate_found = False
except:
logger.warning('Duplicate accession check failed: %s %s' % (r.status_code, r.reason))
logger.debug(r.text)
duplicate_found = False
if duplicate_found:
if force:
logger.info("Duplicate detected, but force=true, so continuing")
else:
logger.info("Duplicate detected, skipping")
return
local_fname = dx.name
logger.info("Downloading %s" %(local_fname))
dxpy.download_dxfile(dx.get_id(),local_fname)
f.update({'md5sum': common.md5(local_fname)})
f['notes'] = json.dumps(f.get('notes'))
url = urlparse.urljoin(server,'files/')
if dryrun:
logger.info("Dry run. Would POST %s" %(f))
new_file_object = {}
else:
r = requests.post(url, auth=keypair, headers={'content-type': 'application/json'}, data=json.dumps(f))
try:
r.raise_for_status()
new_file_object = r.json()['@graph'][0]
logger.info("New accession: %s" %(new_file_object.get('accession')))
except:
logger.warning('POST file object failed: %s %s' % (r.status_code, r.reason))
logger.debug(r.text)
new_file_object = {}
if r.status_code == 409:
try: #cautiously add a tag with the existing accession number
if calculated_md5 in r.json().get('detail'):
url = urlparse.urljoin(server,'/search/?type=file&md5sum=%s' %(calculated_md5))
r = requests.get(url,auth=keypair)
r.raise_for_status()
accessioned_file = r.json()['@graph'][0]
existing_accession = accessioned_file['accession']
dx.add_tags([existing_accession])
logger.info('Already accessioned. Added %s to dxfile tags' %(existing_accession))
except:
logger.info('Conflict does not appear to be md5 ... continuing')
if new_file_object:
creds = new_file_object['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
logger.info("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', local_fname, creds['upload_url'], '--quiet'], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
logger.error("Upload failed with exit code %d" % e.returncode)
upload_returncode = e.returncode
else:
upload_returncode = 0
end = time.time()
duration = end - start
logger.info("Uploaded in %.2f seconds" % duration)
dx.add_tags([new_file_object.get('accession')])
else:
upload_returncode = -1
try:
os.remove(local_fname)
except:
pass
return common.encoded_get(urlparse.urljoin(server,'/files/%s' %(new_file_object.get('accession')), keypair))
def analysis_files(analysis_id, keypair, server, assembly):
analysis_id = analysis_id.strip()
analysis = dxpy.describe(analysis_id)
project = analysis.get('project')
m = re.match('^(ENCSR[0-9]{3}[A-Z]{3}) Peaks',analysis['executableName'])
if m:
experiment_accession = m.group(1)
else:
logger.info("No accession in %s, skipping." %(analysis['executableName']))
return
experiment = common.encoded_get(urlparse.urljoin(server,'/experiments/%s' %(experiment_accession)), keypair)
bams = get_rep_bams(experiment, keypair, server)
rep1_bam = bams[0]['accession']
rep2_bam = bams[1]['accession']
common_metadata = {
'assembly': assembly,
'lab': 'encode-processing-pipeline',
'award': 'U41HG006992',
}
narrowpeak_metadata = common.merge_dicts(
{'file_format': 'bed_narrowPeak', 'file_format_specifications': ['ENCODE:narrowPeak.as'], 'output_type': 'peaks'}, common_metadata)
replicated_narrowpeak_metadata = common.merge_dicts(
{'file_format': 'bed_narrowPeak', 'file_format_specifications': ['ENCODE:narrowPeak.as'], 'output_type': 'replicated peaks'}, common_metadata)
gappedpeak_metadata = common.merge_dicts(
{'file_format': 'bed_gappedPeak', 'file_format_specifications': ['ENCODE:gappedPeak.as'], 'output_type': 'peaks'}, common_metadata)
replicated_gappedpeak_metadata = common.merge_dicts(
{'file_format': 'bed_gappedPeak', 'file_format_specifications': ['ENCODE:gappedPeak.as'], 'output_type': 'replicated peaks'}, common_metadata)
narrowpeak_bb_metadata = common.merge_dicts(
{'file_format': 'narrowPeak', 'file_format_specifications': ['ENCODE:narrowPeak.as'], 'output_type': 'peaks'}, common_metadata)
replicated_narrowpeak_bb_metadata = common.merge_dicts(
{'file_format': 'narrowPeak', 'file_format_specifications': ['ENCODE:narrowPeak.as'], 'output_type': 'replicated peaks'}, common_metadata)
gappedpeak_bb_metadata = common.merge_dicts(
{'file_format': 'gappedPeak', 'file_format_specifications': ['ENCODE:gappedPeak.as'], 'output_type': 'peaks'}, common_metadata)
replicated_gappedpeak_bb_metadata = common.merge_dicts(
{'file_format': 'gappedPeak', 'file_format_specifications': ['ENCODE:gappedPeak.as'], 'output_type': 'replicated peaks'}, common_metadata)
fc_signal_metadata = common.merge_dicts(
{'file_format': 'bigWig', 'output_type': 'fold change over control'}, common_metadata)
pvalue_signal_metadata = common.merge_dicts(
{'file_format': 'bigWig', 'output_type': 'signal p-value'}, common_metadata)
stage_outputs = {
"ENCODE Peaks" : {
'files': [
common.merge_dicts({'name': 'rep1_narrowpeaks', 'derived_from': [rep1_bam]}, narrowpeak_metadata),
common.merge_dicts({'name': 'rep2_narrowpeaks', 'derived_from': [rep2_bam]}, narrowpeak_metadata),
common.merge_dicts({'name': 'pooled_narrowpeaks', 'derived_from': [rep1_bam, rep2_bam]}, narrowpeak_metadata),
common.merge_dicts({'name': 'rep1_narrowpeaks_bb', 'derived_from': [rep1_bam]}, narrowpeak_bb_metadata),
common.merge_dicts({'name': 'rep2_narrowpeaks_bb', 'derived_from': [rep2_bam]}, narrowpeak_bb_metadata),
common.merge_dicts({'name': 'pooled_narrowpeaks_bb', 'derived_from': [rep1_bam, rep2_bam]}, narrowpeak_bb_metadata),
common.merge_dicts({'name': 'rep1_gappedpeaks', 'derived_from': [rep1_bam]}, gappedpeak_metadata),
common.merge_dicts({'name': 'rep2_gappedpeaks', 'derived_from': [rep2_bam]}, gappedpeak_metadata),
common.merge_dicts({'name': 'pooled_gappedpeaks', 'derived_from': [rep1_bam, rep2_bam]}, gappedpeak_metadata),
common.merge_dicts({'name': 'rep1_gappedpeaks_bb', 'derived_from': [rep1_bam]}, gappedpeak_bb_metadata),
common.merge_dicts({'name': 'rep2_gappedpeaks_bb', 'derived_from': [rep2_bam]}, gappedpeak_bb_metadata),
common.merge_dicts({'name': 'pooled_gappedpeaks_bb', 'derived_from': [rep1_bam, rep2_bam]}, gappedpeak_bb_metadata),
common.merge_dicts({'name': 'rep1_pvalue_signal', 'derived_from': [rep1_bam]}, pvalue_signal_metadata),
common.merge_dicts({'name': 'rep2_pvalue_signal', 'derived_from': [rep2_bam]}, pvalue_signal_metadata),
common.merge_dicts({'name': 'pooled_pvalue_signal', 'derived_from': [rep1_bam, rep2_bam]}, pvalue_signal_metadata),
common.merge_dicts({'name': 'rep1_fc_signal', 'derived_from': [rep1_bam]}, fc_signal_metadata),
common.merge_dicts({'name': 'rep2_fc_signal', 'derived_from': [rep2_bam]}, fc_signal_metadata),
common.merge_dicts({'name': 'pooled_fc_signal', 'derived_from': [rep1_bam, rep2_bam]}, fc_signal_metadata)],
'qc': []},
"Overlap narrowpeaks": {
'files': [
common.merge_dicts({'name': 'overlapping_peaks', 'derived_from': [rep1_bam, rep2_bam]}, replicated_narrowpeak_metadata),
common.merge_dicts({'name': 'overlapping_peaks_bb', 'derived_from': [rep1_bam, rep2_bam]}, replicated_narrowpeak_bb_metadata)],
'qc': ['npeaks_in', 'npeaks_out', 'npeaks_rejected']},
"Overlap gappedpeaks": {
'files': [
common.merge_dicts({'name': 'overlapping_peaks', 'derived_from': [rep1_bam, rep2_bam]}, replicated_gappedpeak_metadata),
common.merge_dicts({'name': 'overlapping_peaks_bb', 'derived_from': [rep1_bam, rep2_bam]}, replicated_gappedpeak_bb_metadata)],
'qc': ['npeaks_in', 'npeaks_out', 'npeaks_rejected']}
}
experiment = common.encoded_get(urlparse.urljoin(server,'/experiments/%s' %(experiment_accession)), keypair)
rep1_bam, rep2_bam = get_rep_bams(experiment, keypair, server)
files = []
for (stage_name, outputs) in stage_outputs.iteritems():
stage_metadata = next(s['execution'] for s in analysis.get('stages') if s['execution']['name'] == stage_name)
for static_metadata in outputs['files']:
output_name = static_metadata['name']
dx = dxpy.DXFile(stage_metadata['output'][output_name], project=project)
file_metadata = {
'dx': dx,
'notes': {
'dx-id': dx.get_id(),
'dx-createdBy': {
'job': stage_metadata['id'],
'executable': stage_metadata['executable'], #todo get applet ID
'user': stage_metadata['launchedBy']},
'qc': dict(zip(outputs['qc'],[stage_metadata['output'][metric] for metric in outputs['qc']]))}, #'aliases': ['ENCODE:%s-%s' %(experiment.get('accession'), static_metadata.pop('name'))],
'dataset': experiment.get('accession'),
'file_size': dx.describe().get('size'),
'submitted_file_name': dx.get_proj_id() + ':' + '/'.join([dx.folder,dx.name])}
file_metadata.update(static_metadata)
files.append(file_metadata)
return files
def main():
args = get_args()
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
authid, authpw, server = common.processkey(args.key, args.keyfile)
keypair = (authid,authpw)
if args.analysis_ids:
ids = args.analysis_ids
else:
ids = args.infile
formats = ['bed_narrowPeak', 'bed_gappedPeak']
fieldnames = ['file','analysis','experiment','replicates','output_name','file_format','output_type','target','biosample_term_name','biosample_term_id','biosample_type','biosample_life_stage','biosample_age','biosample_organism']
writer = csv.DictWriter(args.outfile, fieldnames, delimiter='\t')
writer.writeheader()
for (i, analysis_id) in enumerate(ids):
analysis_id = analysis_id.rstrip()
logger.info('%s' %(analysis_id))
try:
files = analysis_files(analysis_id, keypair, server, args.assembly)
except:
logger.error('%s error finding analysis_files. Check experiment metadata.' %(analysis_id))
for f in [f_obj for f_obj in files if f_obj.get('file_format') in formats]:
fid = f['dx'].get_id()
local_path = os.path.join(args.outdir,fid)
if not os.path.isfile(local_path):
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
dxpy.download_dxfile(fid, local_path)
replicates = []
for derived_from in f['derived_from']:
rep_ns = common.biorep_ns(derived_from, server, keypair)
for r in rep_ns:
replicates.append(r)
experiment = common.encoded_get(urlparse.urljoin(server,'/experiments/%s' %(f['dataset'])), keypair)
rep = common.encoded_get(urlparse.urljoin(server, experiment['replicates'][0]), keypair)
lib = common.encoded_get(urlparse.urljoin(server, rep['library']), keypair)
biosample = common.encoded_get(urlparse.urljoin(server, lib['biosample']), keypair)
writer.writerow({
'file': fid,
'analysis': analysis_id,
'experiment': experiment.get('accession'),
'replicates': replicates,
'output_name': f.get('name'),
'file_format': f.get('file_format'),
'output_type': f.get('output_type'),
'target': experiment.get('target'),
'biosample_term_name': experiment.get('biosample_term_name'),
'biosample_term_id': experiment.get('biosample_term_id'),
'biosample_type': experiment.get('biosample_type'),
'biosample_life_stage': biosample.get('life_stage'),
'biosample_age': biosample.get('age'),
'biosample_organism': biosample.get('organism')})
if __name__ == '__main__':
main()
| |
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid as stdlib_uuid
from oslo_policy import policy as oslo_policy
from oslo_utils import timeutils
import webob
from nova.api.openstack.compute import consoles as consoles_v21
from nova.compute import vm_states
from nova import exception
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class FakeInstanceDB(object):
def __init__(self):
self.instances_by_id = {}
self.ids_by_uuid = {}
self.max_id = 0
def return_server_by_id(self, context, id):
if id not in self.instances_by_id:
self._add_server(id=id)
return dict(self.instances_by_id[id])
def return_server_by_uuid(self, context, uuid):
if uuid not in self.ids_by_uuid:
self._add_server(uuid=uuid)
return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
def _add_server(self, id=None, uuid=None):
if id is None:
id = self.max_id + 1
if uuid is None:
uuid = str(stdlib_uuid.uuid4())
instance = stub_instance(id, uuid=uuid)
self.instances_by_id[id] = instance
self.ids_by_uuid[uuid] = id
if id > self.max_id:
self.max_id = id
def stub_instance(id, user_id='fake', project_id='fake', host=None,
vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0):
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_pass": "",
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": key_name,
"key_data": key_data,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"root_gb": 0,
"hostname": "",
"host": host,
"instance_type": {},
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"launched_at": timeutils.utcnow(),
"terminated_at": timeutils.utcnow(),
"availability_zone": "",
"display_name": server_name,
"display_description": "",
"locked": False,
"metadata": [],
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress}
return instance
class ConsolesControllerTestV21(test.NoDBTestCase):
def setUp(self):
super(ConsolesControllerTestV21, self).setUp()
self.flags(verbose=True)
self.instance_db = FakeInstanceDB()
self.stub_out('nova.db.instance_get',
self.instance_db.return_server_by_id)
self.stub_out('nova.db.instance_get_by_uuid',
self.instance_db.return_server_by_uuid)
self.uuid = str(stdlib_uuid.uuid4())
self.url = '/v2/fake/servers/%s/consoles' % self.uuid
self._set_up_controller()
def _set_up_controller(self):
self.controller = consoles_v21.ConsolesController()
def test_create_console(self):
def fake_create_console(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
return {}
self.stub_out('nova.console.api.API.create_console',
fake_create_console)
req = fakes.HTTPRequest.blank(self.url)
self.controller.create(req, self.uuid, None)
def test_create_console_unknown_instance(self):
def fake_create_console(cons_self, context, instance_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stub_out('nova.console.api.API.create_console',
fake_create_console)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, self.uuid, None)
def test_show_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool, instance_name='inst-0001')
expected = {'console': {'id': 20,
'port': 'fake_port',
'host': 'fake_hostname',
'password': 'fake_password',
'instance_name': 'inst-0001',
'console_type': 'fake_type'}}
self.stub_out('nova.console.api.API.get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
res_dict = self.controller.show(req, self.uuid, '20')
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stub_out('nova.console.api.API.get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_show_console_unknown_instance(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFoundForInstance(
instance_uuid=instance_id)
self.stub_out('nova.console.api.API.get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_list_consoles(self):
def fake_get_consoles(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
pool1 = dict(console_type='fake_type',
public_hostname='fake_hostname')
cons1 = dict(id=10, password='fake_password',
port='fake_port', pool=pool1)
pool2 = dict(console_type='fake_type2',
public_hostname='fake_hostname2')
cons2 = dict(id=11, password='fake_password2',
port='fake_port2', pool=pool2)
return [cons1, cons2]
expected = {'consoles':
[{'console': {'id': 10, 'console_type': 'fake_type'}},
{'console': {'id': 11, 'console_type': 'fake_type2'}}]}
self.stub_out('nova.console.api.API.get_consoles', fake_get_consoles)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool)
def fake_delete_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
self.stub_out('nova.console.api.API.get_console', fake_get_console)
self.stub_out('nova.console.api.API.delete_console',
fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.controller.delete(req, self.uuid, '20')
def test_delete_console_unknown_console(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stub_out('nova.console.api.API.delete_console',
fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
def test_delete_console_unknown_instance(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFoundForInstance(
instance_uuid=instance_id)
self.stub_out('nova.console.api.API.delete_console',
fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
def _test_fail_policy(self, rule, action, data=None):
rules = {
rule: "!",
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
req = fakes.HTTPRequest.blank(self.url + '/20')
if data is not None:
self.assertRaises(exception.PolicyNotAuthorized, action,
req, self.uuid, data)
else:
self.assertRaises(exception.PolicyNotAuthorized, action,
req, self.uuid)
def test_delete_console_fail_policy(self):
self._test_fail_policy("os_compute_api:os-consoles:delete",
self.controller.delete, data='20')
def test_create_console_fail_policy(self):
self._test_fail_policy("os_compute_api:os-consoles:create",
self.controller.create, data='20')
def test_index_console_fail_policy(self):
self._test_fail_policy("os_compute_api:os-consoles:index",
self.controller.index)
def test_show_console_fail_policy(self):
self._test_fail_policy("os_compute_api:os-consoles:show",
self.controller.show, data='20')
| |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import Enum
import itertools
import warnings
import numpy as np
import pandas as pd
import holoviews as hv
import bokeh.models
from lisa.analysis.base import TraceAnalysisBase
from lisa.utils import memoized, kwargs_forwarded_to, deprecate
from lisa.datautils import df_filter_task_ids, series_rolling_apply, series_refit_index, df_refit_index, df_deduplicate, df_split_signals, df_add_delta, df_window, df_update_duplicates, df_combine_duplicates
from lisa.trace import requires_events, will_use_events_from, may_use_events, TaskID, CPU, MissingTraceEventError
from lisa._generic import TypedList
from lisa.notebook import _hv_neutral, plot_signal, _hv_twinx
class StateInt(int):
"""
An tweaked int for :class:`lisa.analysis.tasks.TaskState`
"""
def __new__(cls, value, char="", doc=""):
new = super().__new__(cls, value)
new.char = char
new.__doc__ = doc
return new
def __or__(self, other):
char = self.char
if other.char:
char = "|".join(char + other.char)
return type(self)(
int(self) | int(other),
char=(self.char + other.char))
# This is needed for some obscure reason (maybe a bug in std library ?)
# In any case, if we don't provide that, Enum's metaclass will "sabbotage"
# pickling, and by doing so, will set cls.__module__ = '<unknown>'
__reduce__ = int.__reduce__
class TaskState(StateInt, Enum):
"""
Represents the task state as visible in sched_switch
* Values are extracted from include/linux/sched.h
* Chars are extracted from fs/proc/array.c:get_task_state()
"""
# pylint-suppress: bad-whitespace
TASK_RUNNING = 0x0000, "R", "Running"
TASK_INTERRUPTIBLE = 0x0001, "S", "Sleeping"
TASK_UNINTERRUPTIBLE = 0x0002, "D", "Disk sleep"
# __ has a special meaning in Python so let's not do that
TASK_STOPPED = 0x0004, "T", "Stopped"
TASK_TRACED = 0x0008, "t", "Tracing stop"
EXIT_DEAD = 0x0010, "X", "Dead"
EXIT_ZOMBIE = 0x0020, "Z", "Zombie"
# Apparently not visible in traces
# EXIT_TRACE = (EXIT_ZOMBIE[0] | EXIT_DEAD[0])
TASK_PARKED = 0x0040, "P", "Parked"
TASK_DEAD = 0x0080, "I", "Idle"
TASK_WAKEKILL = 0x0100
TASK_WAKING = 0x0200, "W", "Waking" # LISA-only char definition
TASK_NOLOAD = 0x0400
TASK_NEW = 0x0800
TASK_STATE_MAX = 0x1000
# LISA synthetic states
# Used to differenciate runnable (R) vs running (A)
TASK_ACTIVE = 0x2000, "A", "Active"
TASK_RENAMED = 0x2001, "N", "Renamed"
# Used when the task state is unknown
TASK_UNKNOWN = -1, "U", "Unknown"
@classmethod
def list_reported_states(cls):
"""
List the states that can be reported in a ``sched_switch`` trace
See include/linux/sched.h:TASK_REPORT
"""
return [state for state in cls if 0 <= state <= cls.TASK_DEAD]
# Could use IntFlag instead once we move to Python 3.6
@classmethod
@memoized
def sched_switch_str(cls, value):
"""
Get the task state string that would be used in a ``sched_switch`` event
:param value: The task state value
:type value: int
Tries to emulate what is done in include/trace/events:TRACE_EVENT(sched_switch)
"""
def find_states(value, states):
return [
state.char
for state in states
if value & state.value
]
reported_states = cls.list_reported_states()
res = '|'.join(find_states(value, reported_states))
res = res if res else cls.TASK_RUNNING.char
# Flag the presence of unreportable states with a "+"
unreportable_states = [
state for state in cls
if state.value >= 0 and state not in reported_states
]
if find_states(value, unreportable_states):
res += '+'
return res
@classmethod
def from_sched_switch_str(cls, string):
"""
Build a :class:`StateInt` from a string as it would be used in
``sched_switch`` event's ``prev_state`` field.
:param string: String to parse.
:type string: str
"""
state = 0
for _state in cls:
if _state.char in string:
state |= _state
return state
class TasksAnalysis(TraceAnalysisBase):
"""
Support for Tasks signals analysis.
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
name = 'tasks'
@requires_events('sched_switch')
def cpus_of_tasks(self, tasks):
"""
Return the list of CPUs where the ``tasks`` executed.
:param tasks: Task names or PIDs or ``(pid, comm)`` to look for.
:type tasks: list(int or str or tuple(int, str))
"""
trace = self.trace
df = trace.df_event('sched_switch')[['next_pid', 'next_comm', '__cpu']]
task_ids = [trace.get_task_id(task, update=False) for task in tasks]
df = df_filter_task_ids(df, task_ids, pid_col='next_pid', comm_col='next_comm')
cpus = df['__cpu'].unique()
return sorted(cpus)
def _get_task_pid_name(self, pid):
"""
Get the last name the given PID had.
"""
return self.trace.get_task_pid_names(pid)[-1]
###############################################################################
# DataFrame Getter Methods
###############################################################################
@TraceAnalysisBase.cache
@requires_events('sched_wakeup')
def df_tasks_wakeups(self):
"""
The number of wakeups per task
:returns: a :class:`pandas.DataFrame` with:
* Task PIDs as index
* A ``wakeups`` column (The number of wakeups)
"""
df = self.trace.df_event('sched_wakeup')
wakeups = df.groupby('pid', observed=True, sort=False).count()["comm"]
df = pd.DataFrame(wakeups).rename(columns={"comm": "wakeups"})
df["comm"] = df.index.map(self._get_task_pid_name)
return df
@TraceAnalysisBase.cache
@df_tasks_wakeups.used_events
def df_top_wakeup(self, min_wakeups=100):
"""
Tasks which wakeup more frequently than a specified threshold.
:param min_wakeups: minimum number of wakeups
:type min_wakeups: int
"""
df = self.df_tasks_wakeups()
# Compute number of samples above threshold
df = df[df.wakeups > min_wakeups]
df = df.sort_values(by="wakeups", ascending=False)
return df
@TraceAnalysisBase.cache
@requires_events('sched_switch')
def df_rt_tasks(self, min_prio=100):
"""
Tasks with RT priority
.. note:: priorities uses scheduler values, thus: the lower the value the
higher is the task priority.
RT Priorities: [ 0..100]
FAIR Priorities: [101..120]
:param min_prio: minimum priority
:type min_prio: int
:returns: a :class:`pandas.DataFrame` with:
* Task PIDs as index
* A ``prio`` column (The priority of the task)
* A ``comm`` column (The name of the task)
"""
df = self.trace.df_event('sched_switch')
# Filters tasks which have a priority bigger than threshold
df = df[df.next_prio <= min_prio]
# Filter columns of interest
rt_tasks = df[['next_pid', 'next_prio']]
rt_tasks = rt_tasks.drop_duplicates()
# Order by priority
rt_tasks.sort_values(
by=['next_prio', 'next_pid'], ascending=True, inplace=True)
rt_tasks.rename(
columns={'next_pid': 'pid', 'next_prio': 'prio'}, inplace=True)
rt_tasks.set_index('pid', inplace=True)
rt_tasks['comm'] = rt_tasks.index.map(self._get_task_pid_name)
return rt_tasks
@requires_events('sched_switch', 'sched_wakeup')
@will_use_events_from('task_rename')
@may_use_events('sched_wakeup_new')
def _df_tasks_states(self, tasks=None, return_one_df=False):
"""
Compute tasks states for all tasks.
:param tasks: If specified, states of these tasks only will be yielded.
The :class:`lisa.trace.TaskID` must have a ``pid`` field specified,
since the task state is per-PID.
:type tasks: list(lisa.trace.TaskID) or list(int)
:param return_one_df: If ``True``, a single dataframe is returned with
new extra columns. If ``False``, a generator is returned that
yields tuples of ``(TaskID, task_df)``. Each ``task_df`` contains
the new columns.
:type return_one_df: bool
"""
######################################################
# A) Assemble the sched_switch and sched_wakeup events
######################################################
def get_df(event):
# Ignore the end of the window so we can properly compute the
# durations
return self.trace.df_event(event, window=(self.trace.start, None))
def filters_comm(task):
try:
return task.comm is not None
except AttributeError:
return isinstance(task, str)
# Add the rename events if we are interested in the comm of tasks
add_rename = any(map(filters_comm, tasks or []))
wk_df = get_df('sched_wakeup')
sw_df = get_df('sched_switch')
try:
wkn_df = get_df('sched_wakeup_new')
except MissingTraceEventError:
pass
else:
wk_df = pd.concat([wk_df, wkn_df])
wk_df = wk_df[["pid", "comm", "target_cpu", "__cpu"]].copy(deep=False)
wk_df["curr_state"] = TaskState.TASK_WAKING
prev_sw_df = sw_df[["__cpu", "prev_pid", "prev_state", "prev_comm"]].copy()
next_sw_df = sw_df[["__cpu", "next_pid", "next_comm"]].copy()
prev_sw_df.rename(
columns={
"prev_pid": "pid",
"prev_state": "curr_state",
"prev_comm": "comm",
},
inplace=True
)
next_sw_df["curr_state"] = TaskState.TASK_ACTIVE
next_sw_df.rename(columns={'next_pid': 'pid', 'next_comm': 'comm'}, inplace=True)
all_sw_df = pd.concat([prev_sw_df, next_sw_df], sort=False)
if add_rename:
rename_df = get_df('task_rename').rename(
columns={
'oldcomm': 'comm',
},
)[['pid', 'comm']]
rename_df['curr_state'] = TaskState.TASK_RENAMED
all_sw_df = pd.concat([all_sw_df, rename_df], sort=False)
# Integer values are prefered here, otherwise the whole column
# is converted to float64
all_sw_df['target_cpu'] = -1
df = pd.concat([all_sw_df, wk_df], sort=False)
df.sort_index(inplace=True)
df.rename(columns={'__cpu': 'cpu'}, inplace=True)
# Restrict the set of data we will process to a given set of tasks
if tasks is not None:
def resolve_task(task):
"""
Get a TaskID for each task, and only update existing TaskID if
they lack a PID field, since that's what we care about in that
function.
"""
try:
do_update = task.pid is None
except AttributeError:
do_update = False
return self.trace.get_task_id(task, update=do_update)
tasks = list(map(resolve_task, tasks))
df = df_filter_task_ids(df, tasks)
df = df_window(df, window=self.trace.window)
# Return a unique dataframe with new columns added
if return_one_df:
df.sort_index(inplace=True)
df.index.name = 'Time'
df.reset_index(inplace=True)
# Since sched_switch is split in two df (next and prev), we end up with
# duplicated indices. Avoid that by incrementing them by the minimum
# amount possible.
df = df_update_duplicates(df, col='Time', inplace=True)
grouped = df.groupby('pid', observed=True, sort=False)
new_columns = dict(
next_state=grouped['curr_state'].shift(-1, fill_value=TaskState.TASK_UNKNOWN),
# GroupBy.transform() will run the function on each group, and
# concatenate the resulting series to create a new column.
# Note: We actually need transform() to chain 2 operations on
# the group, otherwise the first operation returns a final
# Series, and the 2nd is not applied on groups
delta=grouped['Time'].transform(lambda time: time.diff().shift(-1)),
)
df = df.assign(**new_columns)
df.set_index('Time', inplace=True)
return df
# Return a generator yielding (TaskID, task_df) tuples
else:
def make_pid_df(pid_df):
# Even though the initial dataframe contains duplicated indices due to
# using both prev_pid and next_pid in sched_switch event, we should
# never end up with prev_pid == next_pid, so task-specific dataframes
# are expected to be free from duplicated timestamps.
# assert not df.index.duplicated().any()
# Copy the df to add new columns
pid_df = pid_df.copy(deep=False)
# For each PID, add the time it spent in each state
pid_df['delta'] = pid_df.index.to_series().diff().shift(-1)
pid_df['next_state'] = pid_df['curr_state'].shift(-1, fill_value=TaskState.TASK_UNKNOWN)
return pid_df
signals = df_split_signals(df, ['pid'])
return (
(TaskID(pid=col['pid'], comm=None), make_pid_df(pid_df))
for col, pid_df in signals
)
@staticmethod
def _reorder_tasks_states_columns(df):
"""
Reorder once at the end of computation, since doing it for each tasks'
dataframe turned out to be very costly
"""
order = ['pid', 'comm', 'target_cpu', 'cpu', 'curr_state', 'next_state', 'delta']
cols = set(order)
available_cols = set(df.columns)
displayed_cols = [
col
for col in order
if col in (available_cols & cols)
]
extra_cols = sorted(available_cols - cols)
col_list = displayed_cols + extra_cols
return df[col_list]
@TraceAnalysisBase.cache
@_df_tasks_states.used_events
def df_tasks_states(self):
"""
DataFrame of all tasks state updates events
:returns: a :class:`pandas.DataFrame` with:
* A ``cpu`` column (the CPU where the task was on)
* A ``pid`` column (the PID of the task)
* A ``comm`` column (the name of the task)
* A ``target_cpu`` column (the CPU where the task has been scheduled).
Will be ``NaN`` for non-wakeup events
* A ``curr_state`` column (the current task state, see :class:`~TaskState`)
* A ``delta`` column (the duration for which the task will remain in
this state)
* A ``next_state`` column (the next task state)
.. warning:: Since ``sched_switch`` event multiplexes the update to two
PIDs at the same time, the resulting dataframe would contain
duplicated indices, breaking some Pandas functions. In order to
avoid that, the duplicated timestamps are updated with the minimum
increment possible to remove duplication.
"""
df = self._df_tasks_states(return_one_df=True)
return self._reorder_tasks_states_columns(df)
@TraceAnalysisBase.cache
@_df_tasks_states.used_events
def df_task_states(self, task, stringify=False):
"""
DataFrame of task's state updates events
:param task: The task's name or PID or tuple ``(pid, comm)``
:type task: int or str or tuple(int, str)
:param stringify: Include stringifed :class:`TaskState` columns
:type stringify: bool
:returns: a :class:`pandas.DataFrame` with:
* A ``cpu`` column (the CPU where the task was on)
* A ``target_cpu`` column (the CPU where the task has been scheduled).
Will be ``-1`` for non-wakeup events
* A ``curr_state`` column (the current task state, see :class:`~TaskState`)
* A ``next_state`` column (the next task state)
* A ``delta`` column (the duration for which the task will remain in
this state)
"""
tasks_df = list(self._df_tasks_states(tasks=[task]))
if not tasks_df:
raise ValueError(f'Task "{task}" has no associated events among: {self._df_tasks_states.used_events}')
task_id, task_df = tasks_df[0]
task_df = task_df.drop(columns=["pid", "comm"])
if stringify:
self.stringify_df_task_states(task_df, ["curr_state", "next_state"], inplace=True)
return self._reorder_tasks_states_columns(task_df)
@classmethod
def stringify_task_state_series(cls, series):
"""
Stringify a series containing :class:`TaskState` values
:param series: The series
:type series: pandas.Series
The common use case for this will be to pass a dataframe column::
df["state_str"] = stringify_task_state_series(df["state"])
"""
def stringify_state(state):
try:
return TaskState(state).char
except ValueError:
return TaskState.sched_switch_str(state)
return series.apply(stringify_state)
@classmethod
def stringify_df_task_states(cls, df, columns, inplace=False):
"""
Adds stringified :class:`TaskState` columns to a Dataframe
:param df: The DataFrame to operate on
:type df: pandas.DataFrame
:param columns: The columns to stringify
:type columns: list
:param inplace: Do the modification on the original DataFrame
:type inplace: bool
"""
df = df if inplace else df.copy()
for col in columns:
df[f"{col}_str"] = cls.stringify_task_state_series(df[col])
return df
@TraceAnalysisBase.cache
@_df_tasks_states.used_events
def df_tasks_runtime(self):
"""
DataFrame of the time each task spent in TASK_ACTIVE (:class:`TaskState`)
:returns: a :class:`pandas.DataFrame` with:
* PIDs as index
* A ``comm`` column (the name of the task)
* A ``runtime`` column (the time that task spent running)
.. note:: This function only tracks time spent by each PID. The
reported name is the last name associated with the PID in chronological
order.
"""
runtimes = {}
for task, pid_df in self._df_tasks_states():
pid = task.pid
# Make sure to only look at the relevant portion of the dataframe
# with the window, since we are going to make a time-based sum
pid_df = df_refit_index(pid_df, window=self.trace.window)
pid_df = df_add_delta(pid_df)
# Resolve the comm to the last name of the PID in that window
comms = pid_df['comm'].unique()
comm = comms[-1]
pid_df = pid_df[pid_df['curr_state'] == TaskState.TASK_ACTIVE]
runtimes[pid] = (pid_df['delta'].sum(skipna=True), comm)
df = pd.DataFrame.from_dict(runtimes, orient="index", columns=["runtime", 'comm'])
df.index.name = "pid"
df.sort_values(by="runtime", ascending=False, inplace=True)
return df
@TraceAnalysisBase.cache
@df_task_states.used_events
def df_task_total_residency(self, task):
"""
DataFrame of a task's execution time on each CPU
:param task: the task to report runtimes for
:type task: int or str or tuple(int, str)
:returns: a :class:`pandas.DataFrame` with:
* CPU IDs as index
* A ``runtime`` column (the time the task spent being active)
"""
df = self.df_task_states(task)
# Get the correct delta for the window we want.
df = df_add_delta(df, window=self.trace.window, col='runtime')
df = df[df['curr_state'] == TaskState.TASK_ACTIVE]
# For each CPU, sum the time spent on each by each task
by_cpu = df.groupby('cpu', observed=True, sort=False)
residency_df = by_cpu['runtime'].sum().to_frame()
# Add runtime for CPUs that did not appear in the window
residency_df = residency_df.reindex(
residency_df.index.union(range(self.trace.cpus_count))
)
return residency_df.fillna(0).sort_index()
@df_task_total_residency.used_events
def df_tasks_total_residency(self, tasks=None, ascending=False, count=None):
"""
DataFrame of tasks execution time on each CPU
:param tasks: List of tasks to report, all trace tasks by default
:type tasks: list(int or str or tuple(int, str))
:param ascending: Set True to order plot by ascending task runtime
False by default
:type ascending: bool
:param count: Maximum number of tasks to report
:type count: int
"""
if tasks is None:
task_ids = self.trace.task_ids
else:
task_ids = itertools.chain.from_iterable(
self.trace.get_task_ids(task)
for task in tasks
)
def get_task_df(task):
try:
df = self.ana.tasks.df_task_total_residency(task)
except MissingTraceEventError:
raise
# Not all tasks may be available, e.g. tasks outside the TraceView
# window
except Exception:
return None
else:
return df.T.rename(index={'runtime': str(task)})
res_df = pd.concat(
df
for df in map(get_task_df, task_ids)
if df is not None
)
res_df['Total'] = res_df.sum(axis=1)
res_df.sort_values(by='Total', ascending=ascending, inplace=True)
if count is not None:
res_df = res_df.head(count)
return res_df
@TraceAnalysisBase.cache
@df_task_states.used_events
def df_task_activation(self, task, cpu=None, active_value=1, sleep_value=0, preempted_value=np.NaN):
"""
DataFrame of a task's active time on a given CPU
:param task: the task to report activations of
:type task: int or str or tuple(int, str)
:param cpu: the CPUs to look at. If ``None``, all CPUs will be used.
:type cpu: int or None
:param active_value: the value to use in the series when task is
active.
:type active_value: float
:param sleep_value: the value to use in the series when task is
sleeping.
:type sleep_value: float
:param preempted_value: the value to use in the series when task is
preempted (runnable but not actually executing).
:type sleep_value: float
:returns: a :class:`pandas.DataFrame` with:
* A timestamp as index
* A ``active`` column, containing ``active_value`` when the task is
running, ``sleep_value`` when sleeping, and ``preempted_value``
otherwise.
* A ``cpu`` column with the CPU the task was running on.
* A ``duration`` column containing the duration of the current sleep or activation.
* A ``duty_cycle`` column containing the duty cycle in ``[0...1]`` of
the task, updated at each pair of activation and sleep.
"""
df = self.df_task_states(task)
def f(state):
if state == TaskState.TASK_ACTIVE:
return active_value
# TASK_RUNNING happens when a task is preempted (so it's not
# TASK_ACTIVE anymore but still runnable)
elif state == TaskState.TASK_RUNNING:
# Return NaN regardless of preempted_value, since some below
# code relies on that
return np.NaN
else:
return sleep_value
if cpu is not None:
df = df[df['cpu'] == cpu]
df = df.copy()
# TASK_WAKING can just be removed. The delta will then be computed
# without it, which means the time spent in WAKING state will be
# accounted into the previous state.
df = df[df['curr_state'] != TaskState.TASK_WAKING]
df['active'] = df['curr_state'].map(f)
df = df[['active', 'cpu']]
# Only keep first occurence of each adjacent duplicates, since we get
# events when the signal changes
df = df_deduplicate(df, consecutives=True, keep='first')
# Once we removed the duplicates, we can compute the time spent while sleeping or activating
df_add_delta(df, col='duration', inplace=True)
if not np.isnan(preempted_value):
df['active'].fillna(preempted_value, inplace=True)
# Merge consecutive activations' duration. They could have been
# split in two by a bit of preemption, and we don't want that to
# affect the duty cycle.
df_combine_duplicates(df, cols=['active'], func=lambda df: df['duration'].sum(), output_col='duration', inplace=True)
# Make a dataframe where the rows corresponding to preempted time are
# removed, unless preempted_value is set to non-NA
preempt_free_df = df.dropna().copy()
sleep = preempt_free_df[preempt_free_df['active'] == sleep_value]['duration']
active = preempt_free_df[preempt_free_df['active'] == active_value]['duration']
# Pair an activation time with it's following sleep time
sleep = sleep.reindex(active.index, method='bfill')
duty_cycle = active / (active + sleep)
df['duty_cycle'] = duty_cycle
df['duty_cycle'].fillna(inplace=True, method='ffill')
return df
###############################################################################
# Plotting Methods
###############################################################################
def _plot_markers(self, df, label):
return hv.Scatter(df, label=label).options(marker='+').options(
backend='bokeh',
size=5,
).options(
backend='matplotlib',
s=30,
)
def _plot_overutilized(self):
try:
return self.ana.status.plot_overutilized()
except MissingTraceEventError:
return _hv_neutral()
@TraceAnalysisBase.plot_method
@requires_events('sched_switch')
def plot_task_residency(self, task: TaskID):
"""
Plot on which CPUs the task ran on over time
:param task: Task to track
:type task: int or str or tuple(int, str)
"""
task_id = self.trace.get_task_id(task, update=False)
sw_df = self.trace.df_event("sched_switch")
sw_df = df_filter_task_ids(sw_df, [task_id], pid_col='next_pid', comm_col='next_comm')
def plot_residency():
if "freq-domains" in self.trace.plat_info:
# If we are aware of frequency domains, use one color per domain
for domain in self.trace.plat_info["freq-domains"]:
series = sw_df[sw_df["__cpu"].isin(domain)]["__cpu"]
series = series_refit_index(series, window=self.trace.window)
if series.empty:
return _hv_neutral()
else:
return self._plot_markers(
series,
label=f"Task running in domain {domain}"
)
else:
self._plot_markers(
series_refit_index(sw_df['__cpu'], window=self.trace.window)
)
return (
plot_residency().options(ylabel='cpu') *
self._plot_overutilized()
).options(
title=f'CPU residency of task {task}'
)
@TraceAnalysisBase.plot_method
@df_task_total_residency.used_events
def plot_task_total_residency(self, task: TaskID):
"""
Plot a task's total time spent on each CPU
:param task: The task's name or PID or tuple ``(pid, comm)``
:type task: str or int or tuple(int, str)
"""
df = self.df_task_total_residency(task)
return hv.Bars(df['runtime']).options(
title=f"CPU residency of task {task}",
xlabel='CPU',
ylabel='Runtime (s)',
invert_axes=True,
)
@TraceAnalysisBase.plot_method
@df_tasks_total_residency.used_events
def plot_tasks_total_residency(self, tasks: TypedList[TaskID]=None, ascending: bool=False,
count: bool=None):
"""
Plot the stacked total time spent by each task on each CPU
:param tasks: List of tasks to plot, all trace tasks by default
:type tasks: list(int or str or tuple(int, str))
:param ascending: Set True to order plot by ascending task runtime,
False by default
:type ascending: bool
:param count: Maximum number of tasks to report
:type count: int
"""
df = self.df_tasks_total_residency(tasks, ascending, count)
df = df.copy(deep=False)
df['task'] = df.index
df.columns = list(map(str, df.columns))
df = df.melt(id_vars=['task'], var_name='cpu', value_name='Runtime (s)')
return hv.Bars(
df,
kdims=['cpu', 'task']
).options(
stacked=True,
invert_axes=True,
title=f"Stacked CPU residency of [{len(df.index)}] selected tasks",
).sort('cpu')
def _plot_cpu_heatmap(self, event, bins, xbins, cmap):
"""
Plot some data in a heatmap-style 2d histogram
"""
df = self.trace.df_event(event)
df = df_window(df, window=self.trace.window, method='exclusive', clip_window=False)
x = df.index
y = df['target_cpu']
if xbins:
warnings.warn('"xbins" parameter is deprecated and will be removed, use "bins" instead', DeprecationWarning)
bins = xbins
nr_cpus = self.trace.cpus_count
hist = np.histogram2d(y, x, bins=[nr_cpus, bins])
z, _, x = hist
y = list(range(nr_cpus))
return hv.HeatMap(
(x, y, z),
kdims=[
# Manually set dimension name/label so that shared_axes works
# properly.
# Also makes hover tooltip better.
hv.Dimension('Time'),
hv.Dimension('CPU'),
],
vdims=[
hv.Dimension(event),
]
).options(
colorbar=True,
xlabel='Time (s)',
ylabel='CPU',
# Viridis works both on bokeh and matplotlib
cmap=cmap or 'Viridis',
yticks=[
(cpu, f'CPU{cpu}')
for cpu in y
]
)
@TraceAnalysisBase.plot_method
@requires_events("sched_wakeup")
def _plot_tasks_X(self, event, name, target_cpus, window, per_sec):
df = self.trace.df_event(event)
if target_cpus:
df = df[df['target_cpu'].isin(target_cpus)]
series = series_rolling_apply(
df["target_cpu"],
lambda x: x.count() / (window if per_sec else 1),
window,
window_float_index=False,
center=True
)
if per_sec:
label = f"Number of task {name} per second ({window}s windows)"
else:
label = f"Number of task {name} within {window}s windows"
series = series_refit_index(series, window=self.trace.window)
series.name = name
return plot_signal(series, name=label)
@TraceAnalysisBase.plot_method
def plot_tasks_wakeups(self, target_cpus: TypedList[CPU]=None, window: float=1e-2, per_sec: bool=False):
"""
Plot task wakeups over time
:param target_cpus:
:type target_cpus:
:param window: The rolling window size for wakeup counts.
:type window: float
:param per_sec: Display wakeups per second if True, else wakeup counts
within the window
:type per_sec: bool
"""
return self._plot_tasks_X(
event='sched_wakeup',
name='wakeups',
target_cpus=target_cpus,
window=window,
per_sec=per_sec
)
@TraceAnalysisBase.plot_method
@requires_events("sched_wakeup")
def plot_tasks_wakeups_heatmap(self, bins: int=100, xbins=None, colormap=None):
"""
Plot tasks wakeups heatmap
:param bins: Number of x-axis bins, i.e. in how many slices should
time be arranged
:type bins: int
:param colormap: The name of a colormap:
* matplotlib backend: https://matplotlib.org/stable/tutorials/colors/colormaps.html
* bokeh backend: https://docs.bokeh.org/en/latest/docs/reference/palettes.html
:type colormap: str
"""
return self._plot_cpu_heatmap(
event='sched_wakeup',
bins=bins,
xbins=xbins,
cmap=colormap,
).options(
title="Tasks wakeups over time",
)
@TraceAnalysisBase.plot_method
@requires_events("sched_wakeup_new")
def plot_tasks_forks(self, target_cpus: TypedList[CPU]=None, window: float=1e-2, per_sec: bool=False):
"""
Plot task forks over time
:param target_cpus:
:type target_cpus:
:param window: The rolling window size for fork counts.
:type window: float
:param per_sec: Display wakeups per second if True, else wakeup counts
within the window
:type per_sec: bool
"""
return self._plot_tasks_X(
event='sched_wakeup_new',
name='forks',
target_cpus=target_cpus,
window=window,
per_sec=per_sec
)
@TraceAnalysisBase.plot_method
@requires_events("sched_wakeup_new")
def plot_tasks_forks_heatmap(self, bins: int=100, xbins=None, colormap=None):
"""
Plot number of task forks over time as a heatmap.
:param bins: Number of x-axis bins, i.e. in how many slices should
time be arranged
:type bins: int
:param colormap: The name of a colormap:
* matplotlib backend: https://matplotlib.org/stable/tutorials/colors/colormaps.html
* bokeh backend: https://docs.bokeh.org/en/latest/docs/reference/palettes.html
:type colormap: str
"""
return self._plot_cpu_heatmap(
event='sched_wakeup_new',
bins=bins,
xbins=xbins,
cmap=colormap,
).options(
title="Tasks forks over time",
)
# Use a class attribute so that there will be only one extra hover tool in
# the toolbar rather than one per task when stacking them
_BOKEH_TASK_HOVERTOOL = bokeh.models.HoverTool(
description='Task activations tooltip',
tooltips=[
('Task', '[@pid:@comm]'),
('CPU', '@cpu'),
('#', '$index'),
('Start', '@start'),
('Duration', '@duration'),
('Duty cycle', '@duty_cycle'),
]
)
@df_task_activation.used_events
def _plot_tasks_activation(self, tasks, show_legend=None, cpu: CPU=None, alpha:
float=None, overlay: bool=False, duration: bool=False, duty_cycle:
bool=False, which_cpu: bool=False, height_duty_cycle: bool=False, best_effort=False):
logger = self.logger
def ensure_last_rectangle(df):
# Make sure we will draw the last rectangle, which could be
# critical for tasks that are never sleeping
if df.empty:
return df
else:
start = self.trace.start
last_duration = df['duration'].iat[-1]
if pd.isna(last_duration):
end = self.trace.end
else:
end = df.index[-1] + last_duration
# If the rectangle finishes before the beginning of the trace
# window, we ignore it
if start <= end:
# Clip the beginning so that plots don't extend to the
# left of the trace window.
return df_refit_index(df, window=(start, end))
else:
return df.iloc[0:0]
def make_twinx(fig, **kwargs):
return _hv_twinx(fig, **kwargs)
if which_cpu:
def make_rect_df(df):
half_height = df['active'] / 2
return pd.DataFrame(
dict(
Time=df.index,
CPU=df['cpu'] - half_height,
x1=df.index + df['duration'],
y1=df['cpu'] + half_height,
),
index=df.index
)
else:
def make_rect_df(df):
if duty_cycle or duration:
max_val = max(
df[col].max()
for select, col in (
(duty_cycle, 'duty_cycle'),
(duration, 'duration')
)
if select
)
height_factor = max_val
else:
height_factor = 1
return pd.DataFrame(
dict(
Time=df.index,
CPU=0,
x1=df.index + df['duration'],
y1=df['active'] * height_factor,
),
index=df.index,
)
def plot_extra(task, df):
figs = []
if duty_cycle:
figs.append(
plot_signal(df['duty_cycle'], name=f'Duty cycle of {task}')
)
if duration:
def plot_duration(active, label):
duration_series = df[df['active'] == active]['duration']
# Add blanks in the plot when the state is not the one we care about
duration_series = duration_series.reindex_like(df)
return plot_signal(duration_series, name=f'{label} duration of {task}')
figs.extend(
plot_duration(active, label)
for active, label in (
(True, 'Activations'),
(False, 'Sleep')
)
)
return figs
def check_df(task, df, empty_is_none):
if df.empty:
msg = f'Could not find events associated to task {task}'
if empty_is_none:
logger.debug(msg)
return None
else:
raise ValueError(msg)
else:
return ensure_last_rectangle(df)
def get_task_data(task, df):
df = df.copy()
# Preempted == sleep for plots
df['active'] = df['active'].fillna(0)
if height_duty_cycle:
df['active'] *= df['duty_cycle']
data = make_rect_df(df[df['active'] != 0])
name_df = self.trace.df_event('sched_switch')
name_df = name_df[name_df['next_pid'] == task.pid]
names = name_df['next_comm'].reindex(data.index, method='ffill')
# If there was no sched_switch with next_pid matching task.pid, we
# simply take the last known name of the task, which could
# originate from another field or another event.
#
# Note: This prevents an <NA> value, which makes bokeh choke.
last_comm = self.trace.get_task_pid_names(task.pid)[-1]
if last_comm not in names.cat.categories:
names = names.cat.add_categories([last_comm])
names = names.fillna(last_comm)
# Use a string for PID so that holoviews interprets it as
# categorical variable, rather than continuous. This is important
# for correct color mapping
data['pid'] = str(task.pid)
data['comm'] = names
data['start'] = data.index
data['cpu'] = df['cpu']
data['duration'] = df['duration']
data['duty_cycle'] = df['duty_cycle']
return data
def plot_rect(data):
if show_legend:
opts = {}
else:
# If there is no legend, we are gonna plot all the rectangles at once so we use colormapping to distinguish the tasks
opts = dict(
color='pid',
# Colormap from colorcet with a large number of color, so it is
# suitable for plotting many tasks
cmap='glasbey_hv',
)
return hv.Rectangles(
data,
kdims=[
hv.Dimension('Time'),
hv.Dimension('CPU'),
hv.Dimension('x1'),
hv.Dimension('y1'),
]
).options(
show_legend=show_legend,
alpha=alpha,
**opts,
).options(
backend='matplotlib',
linewidth=0,
).options(
backend='bokeh',
line_width=0,
tools=[self._BOKEH_TASK_HOVERTOOL],
)
if alpha is None:
if overlay or duty_cycle or duration:
alpha = 0.2
else:
alpha = 1
# For performance reasons, plot all the tasks as one hv.Rectangles
# invocation when we get too many tasks
if show_legend is None:
if overlay:
# TODO: twinx() breaks on hv.Overlay, so we are forced to use a
# single hv.Rectangles in that case, meaning no useful legend
show_legend = False
else:
show_legend = len(tasks) < 5
cpus_count = self.trace.cpus_count
task_dfs = {
task: check_df(
task,
self.df_task_activation(task, cpu=cpu),
empty_is_none=best_effort,
)
for task in tasks
}
if best_effort:
task_dfs = {
task: df
for task, df in task_dfs.items()
if df is not None
}
tasks = sorted(task_dfs.keys())
if show_legend:
fig = hv.Overlay(
[
plot_rect(get_task_data(task, df)).relabel(
f'Activations of {task.pid} (' +
', '.join(
task_id.comm
for task_id in self.trace.get_task_ids(task)
) +
')',
)
for task, df in task_dfs.items()
]
).options(
legend_limit=len(tasks) * 100,
)
else:
data = pd.concat(
get_task_data(task, df)
for task, df in task_dfs.items()
)
fig = plot_rect(data)
if overlay:
fig = make_twinx(
fig,
y_range=(-1, cpus_count),
display=False
)
else:
if which_cpu:
fig = fig.options(
'Rectangles',
ylabel='CPU',
yticks=[
(cpu, f'CPU{cpu}')
for cpu in range(cpus_count)
],
).redim(
y=hv.Dimension('y', range=(-0.5, cpus_count - 0.5))
)
elif height_duty_cycle:
fig = fig.options(
'Rectangles',
ylabel='Duty cycle',
)
if duty_cycle or duration:
if duty_cycle:
ylabel = 'Duty cycle'
elif duration:
ylabel = 'Duration (s)'
# TODO: twinx() on hv.Overlay does not work, so we unfortunately have a
# scaling issue here
fig = hv.Overlay(
[fig] +
[
fig
for task, df in task_dfs.items()
for fig in plot_extra(task, df)
]
).options(
ylabel=ylabel,
)
return fig.options(
title='Activations of {}'.format(
', '.join(map(str, tasks))
),
)
@TraceAnalysisBase.plot_method
@_plot_tasks_activation.used_events
@kwargs_forwarded_to(_plot_tasks_activation, ignore=['tasks', 'best_effort'])
def plot_tasks_activation(self, tasks: TypedList[TaskID]=None, hide_tasks: TypedList[TaskID]=None, which_cpu: bool=True, overlay: bool=False, **kwargs):
"""
Plot all tasks activations, in a style similar to kernelshark.
:param tasks: Tasks to plot. If ``None``, all tasks in the trace will
be used.
:type tasks: list(TaskID) or None
:param hide_tasks: Tasks to hide. Note that PID 0 (idle task) will
always be hidden.
:type hide_tasks: list(TaskID) or None
:param alpha: transparency level of the plot.
:type task: float
:param overlay: If ``True``, adjust the transparency and plot
activations on a separate hidden scale so existing scales are not
modified.
:type task: bool
:param duration: Plot the duration of each sleep/activation.
:type duration: bool
:param duty_cycle: Plot the duty cycle of each pair of sleep/activation.
:type duty_cycle: bool
:param which_cpu: If ``True``, plot the activations on each CPU in a
separate row like kernelshark does.
:type which_cpu: bool
:param height_duty_cycle: Height of each activation's rectangle is
proportional to the duty cycle during that activation.
:type height_duty_cycle: bool
.. seealso:: :meth:`df_task_activation`
"""
trace = self.trace
hidden = set(itertools.chain.from_iterable(
trace.get_task_ids(task)
for task in (hide_tasks or [])
))
if tasks:
best_effort = False
task_ids = list(itertools.chain.from_iterable(
map(trace.get_task_ids, tasks)
))
else:
best_effort = True
task_ids = trace.task_ids
full_task_ids = sorted(
task
for task in task_ids
if (
task not in hidden and
task.pid != 0
)
)
# Only consider the PIDs in order to:
# * get the same color for the same PID during its whole life
# * avoid potential issues around task renaming
# Note: The task comm will still be displayed in the hover tool
task_ids = [
TaskID(pid=pid, comm=None)
for pid in sorted(set(x.pid for x in full_task_ids))
]
#TODO: Re-enable the CPU "lanes" once this bug is solved:
# https://github.com/holoviz/holoviews/issues/4979
if False and which_cpu and not overlay:
# Add horizontal lines to delimitate each CPU "lane" in the plot
cpu_lanes = [
hv.HLine(y - offset).options(
color='grey',
alpha=0.2,
).options(
backend='bokeh',
line_width=0.5,
).options(
backend='matplotlib',
linewidth=0.5,
)
for y in range(trace.cpus_count + 1)
for offset in ((0.5, -0.5) if y == 0 else (0.5,))
]
else:
cpu_lanes = []
title = 'Activations of ' + ', '.join(
map(str, full_task_ids)
)
if len(title) > 50:
title = 'Task activations'
return self._plot_tasks_activation(
tasks=task_ids,
which_cpu=which_cpu,
overlay=overlay,
best_effort=best_effort,
**kwargs
).options(
title=title
)
@TraceAnalysisBase.plot_method
@plot_tasks_activation.used_events
@kwargs_forwarded_to(plot_tasks_activation, ignore=['tasks'])
@deprecate('Deprecated since it does not provide anything more than plot_tasks_activation', deprecated_in='2.0', removed_in='3.0', replaced_by=plot_tasks_activation)
def plot_task_activation(self, task: TaskID, **kwargs):
"""
Plot task activations, in a style similar to kernelshark.
:param task: the task to report activations of
:type task: int or str or tuple(int, str)
.. seealso:: :meth:`plot_tasks_activation`
"""
return self.plot_tasks_activation(tasks=[task], **kwargs)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| |
import io
from datetime import datetime
from unittest.mock import patch
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import File
from django.test import TestCase
from storages.backends import ftp
USER = 'foo'
PASSWORD = 'b@r'
HOST = 'localhost'
PORT = 2121
URL = "ftp://{user}:{passwd}@{host}:{port}/".format(user=USER, passwd=PASSWORD,
host=HOST, port=PORT)
LIST_FIXTURE = """drwxr-xr-x 2 ftp nogroup 4096 Jul 27 09:46 dir
-rw-r--r-- 1 ftp nogroup 1024 Jul 27 09:45 fi
-rw-r--r-- 1 ftp nogroup 2048 Jul 27 09:50 fi2"""
def list_retrlines(cmd, func):
for line in LIST_FIXTURE.splitlines():
func(line)
class FTPTest(TestCase):
def setUp(self):
self.storage = ftp.FTPStorage(location=URL)
def test_init_no_location(self):
with self.assertRaises(ImproperlyConfigured):
ftp.FTPStorage()
@patch('storages.backends.ftp.setting', return_value=URL)
def test_init_location_from_setting(self, mock_setting):
storage = ftp.FTPStorage()
self.assertTrue(mock_setting.called)
self.assertEqual(storage.location, URL)
def test_decode_location(self):
config = self.storage._decode_location(URL)
wanted_config = {
'passwd': 'b@r',
'host': 'localhost',
'user': 'foo',
'active': False,
'path': '/',
'port': 2121,
}
self.assertEqual(config, wanted_config)
# Test active FTP
config = self.storage._decode_location('a'+URL)
wanted_config = {
'passwd': 'b@r',
'host': 'localhost',
'user': 'foo',
'active': True,
'path': '/',
'port': 2121,
}
self.assertEqual(config, wanted_config)
def test_decode_location_error(self):
with self.assertRaises(ImproperlyConfigured):
self.storage._decode_location('foo')
with self.assertRaises(ImproperlyConfigured):
self.storage._decode_location('http://foo.pt')
# TODO: Cannot not provide a port
# with self.assertRaises(ImproperlyConfigured):
# self.storage._decode_location('ftp://')
@patch('ftplib.FTP')
def test_start_connection(self, mock_ftp):
self.storage._start_connection()
self.assertIsNotNone(self.storage._connection)
# Start active
storage = ftp.FTPStorage(location='a'+URL)
storage._start_connection()
@patch('ftplib.FTP', **{'return_value.pwd.side_effect': IOError()})
def test_start_connection_timeout(self, mock_ftp):
self.storage._start_connection()
self.assertIsNotNone(self.storage._connection)
@patch('ftplib.FTP', **{'return_value.connect.side_effect': IOError()})
def test_start_connection_error(self, mock_ftp):
with self.assertRaises(ftp.FTPStorageException):
self.storage._start_connection()
@patch('ftplib.FTP', **{'return_value.quit.return_value': None})
def test_disconnect(self, mock_ftp_quit):
self.storage._start_connection()
self.storage.disconnect()
self.assertIsNone(self.storage._connection)
@patch('ftplib.FTP', **{'return_value.pwd.return_value': 'foo'})
def test_mkremdirs(self, mock_ftp):
self.storage._start_connection()
self.storage._mkremdirs('foo/bar')
@patch('ftplib.FTP', **{'return_value.pwd.return_value': 'foo'})
def test_mkremdirs_n_subdirectories(self, mock_ftp):
self.storage._start_connection()
self.storage._mkremdirs('foo/bar/null')
@patch('ftplib.FTP', **{
'return_value.pwd.return_value': 'foo',
'return_value.storbinary.return_value': None
})
def test_put_file(self, mock_ftp):
self.storage._start_connection()
self.storage._put_file('foo', File(io.BytesIO(b'foo'), 'foo'))
@patch('ftplib.FTP', **{
'return_value.pwd.return_value': 'foo',
'return_value.storbinary.side_effect': IOError()
})
def test_put_file_error(self, mock_ftp):
self.storage._start_connection()
with self.assertRaises(ftp.FTPStorageException):
self.storage._put_file('foo', File(io.BytesIO(b'foo'), 'foo'))
def test_open(self):
remote_file = self.storage._open('foo')
self.assertIsInstance(remote_file, ftp.FTPStorageFile)
@patch('ftplib.FTP', **{'return_value.pwd.return_value': 'foo'})
def test_read(self, mock_ftp):
self.storage._start_connection()
self.storage._read('foo')
@patch('ftplib.FTP', **{'return_value.pwd.side_effect': IOError()})
def test_read2(self, mock_ftp):
self.storage._start_connection()
with self.assertRaises(ftp.FTPStorageException):
self.storage._read('foo')
@patch('ftplib.FTP', **{
'return_value.pwd.return_value': 'foo',
'return_value.storbinary.return_value': None
})
def test_save(self, mock_ftp):
self.storage._save('foo', File(io.BytesIO(b'foo'), 'foo'))
@patch('ftplib.FTP', **{'return_value.sendcmd.return_value': '213 20160727094506'})
def test_modified_time(self, mock_ftp):
self.storage._start_connection()
modif_date = self.storage.modified_time('foo')
self.assertEqual(modif_date, datetime(2016, 7, 27, 9, 45, 6))
@patch('ftplib.FTP', **{'return_value.sendcmd.return_value': '500'})
def test_modified_time_error(self, mock_ftp):
self.storage._start_connection()
with self.assertRaises(ftp.FTPStorageException):
self.storage.modified_time('foo')
@patch('ftplib.FTP', **{'return_value.retrlines': list_retrlines})
def test_listdir(self, mock_retrlines):
dirs, files = self.storage.listdir('/')
self.assertEqual(len(dirs), 1)
self.assertEqual(dirs, ['dir'])
self.assertEqual(len(files), 2)
self.assertEqual(sorted(files), sorted(['fi', 'fi2']))
@patch('ftplib.FTP', **{'return_value.retrlines.side_effect': IOError()})
def test_listdir_error(self, mock_ftp):
with self.assertRaises(ftp.FTPStorageException):
self.storage.listdir('/')
@patch('ftplib.FTP', **{'return_value.nlst.return_value': ['foo', 'foo2']})
def test_exists(self, mock_ftp):
self.assertTrue(self.storage.exists('foo'))
self.assertFalse(self.storage.exists('bar'))
@patch('ftplib.FTP', **{'return_value.nlst.side_effect': IOError()})
def test_exists_error(self, mock_ftp):
with self.assertRaises(ftp.FTPStorageException):
self.storage.exists('foo')
@patch('ftplib.FTP', **{
'return_value.delete.return_value': None,
'return_value.nlst.return_value': ['foo', 'foo2']
})
def test_delete(self, mock_ftp):
self.storage.delete('foo')
self.assertTrue(mock_ftp.return_value.delete.called)
@patch('ftplib.FTP', **{'return_value.retrlines': list_retrlines})
def test_size(self, mock_ftp):
self.assertEqual(1024, self.storage.size('fi'))
self.assertEqual(2048, self.storage.size('fi2'))
self.assertEqual(0, self.storage.size('bar'))
@patch('ftplib.FTP', **{'return_value.retrlines.side_effect': IOError()})
def test_size_error(self, mock_ftp):
self.assertEqual(0, self.storage.size('foo'))
def test_url(self):
with self.assertRaises(ValueError):
self.storage._base_url = None
self.storage.url('foo')
self.storage = ftp.FTPStorage(location=URL, base_url='http://foo.bar/')
self.assertEqual('http://foo.bar/foo', self.storage.url('foo'))
class FTPStorageFileTest(TestCase):
def setUp(self):
self.storage = ftp.FTPStorage(location=URL)
@patch('ftplib.FTP', **{'return_value.retrlines': list_retrlines})
def test_size(self, mock_ftp):
file_ = ftp.FTPStorageFile('fi', self.storage, 'wb')
self.assertEqual(file_.size, 1024)
@patch('ftplib.FTP', **{'return_value.pwd.return_value': 'foo'})
@patch('storages.backends.ftp.FTPStorage._read', return_value=io.BytesIO(b'foo'))
def test_readlines(self, mock_ftp, mock_storage):
file_ = ftp.FTPStorageFile('fi', self.storage, 'wb')
self.assertEqual([b'foo'], file_.readlines())
@patch('ftplib.FTP', **{'return_value.pwd.return_value': 'foo'})
@patch('storages.backends.ftp.FTPStorage._read', return_value=io.BytesIO(b'foo'))
def test_read(self, mock_ftp, mock_storage):
file_ = ftp.FTPStorageFile('fi', self.storage, 'wb')
self.assertEqual(b'foo', file_.read())
def test_write(self):
file_ = ftp.FTPStorageFile('fi', self.storage, 'wb')
file_.write(b'foo')
file_.seek(0)
self.assertEqual(file_.file.read(), b'foo')
@patch('ftplib.FTP', **{'return_value.pwd.return_value': 'foo'})
@patch('storages.backends.ftp.FTPStorage._read', return_value=io.BytesIO(b'foo'))
def test_close(self, mock_ftp, mock_storage):
file_ = ftp.FTPStorageFile('fi', self.storage, 'wb')
file_.is_dirty = True
file_.read()
file_.close()
| |
"""This module/class contains functionality for computing (and plotting) radial
velocities and creating reference spectra for extracted fluxes. This should
ideally remain independent of the extraction method, such that it does not
matter which spectrograph took the data, nor what "Spectrograph" object was
used for extraction.
Most of the code below has been moved from the script "test_rhea2_extract.py".
Work still needs to be done post-refactor to ensure function input and outputs
are sensible, their docstrings are informative and they follow the principles of
Object Oriented Programming - such as the Single Responsibility Principle (Along
with a general clean up of the code and comments, such as having the code meet
the python line length guidelines --> the main benefit of which is having
multiple editors open side by side on smaller screens)
TODO
1) Move extract method to either extract module or rhea
2) Try to separate calculation/processing of data from saving/loading/displaying
3) Tidy up inputs to functions (e.g. cull unnecessary input parameters)
4) Make create_ref_spect() output variances (Median Absolute Deviations)
5) Possibly have dark calibration (for both flats and science frames) in its own
method. This would clean up the existing extract method, removing the need
to check whether darks and flats had been passed in (or varying permutations
of each - e.g. in the case where some of the data has already been dark
corrected, such as the solar data)
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as op
import scipy.interpolate as interp
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import constants as const
import PyAstronomy.pyasl as pyasl
import opticstools as ot
import pdb
try:
import pyfits
except:
import astropy.io.fits as pyfits
class RadialVelocity():
"""A RadialVelocity object for calculating and plotting RVS and generating
reference spectra.
Unclear if the object needs to be initialised with any parameters at this
stage. Perhaps a file path?
"""
def __init__(self):
"""(Presently empty) constructor.
"""
pass
def rv_shift_resid(self, params, wave, spect, spect_sdev, spline_ref,
return_spect=False):
"""Find the residuals to a fit of a (subsampled)reference spectrum to an
observed spectrum.
The function for parameters p[0] through p[3] is:
.. math::
y(x) = Ref[ wave(x) * (1 - p[0]/c) ] * exp(p[1] * x^2 + p[2] * x + p[3])
Here "Ref" is a function f(wave)
Parameters
----------
params: array-like
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectra
spect_sdev: float array
standard deviation of the input spectra.
spline_ref: InterpolatedUnivariateSpline instance
For interpolating the reference spectrum
return_spect: boolean
Whether to return the fitted spectrum or the residuals.
wave_ref: float array
The wavelengths of the reference spectrum
ref: float array
The reference spectrum
Returns
-------
resid: float array
The fit residuals
"""
ny = len(spect)
xx = (np.arange(ny)-ny//2)/ny
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
# Lets get this sign correct. A redshift (positive velocity) means that
# a given wavelength for the reference corresponds to a longer
# wavelength for the target, which in turn means that the target
# wavelength has to be interpolated onto shorter wavelengths for the
# reference.
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
if return_spect:
return fitted_spect
else:
return (fitted_spect - spect)/spect_sdev
def rv_shift_chi2(self, params, wave, spect, spect_sdev, spline_ref):
"""Find the chi-squared for an RV fit. Just a wrapper for rv_shift_resid,
so the docstring is cut and paste!
The function for parameters p[0] through p[3] is:
.. math::
y(x) = Ref[ wave(x) * (1 - p[0]/c) ] * exp(p[1] * x^2 + p[2] * x + p[3])
Here "Ref" is a function f(wave)
Parameters
----------
params:
...
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
return_spect: boolean
Whether to return the fitted spectrum or the
wave_ref: float array
The wavelengths of the reference spectrum
ref: float array
The reference spectrum
Returns
-------
chi2:
The fit chi-squared
"""
return np.sum(self.rv_shift_resid(params, wave, spect, spect_sdev, spline_ref)**2)
def rv_shift_jac(self, params, wave, spect, spect_sdev, spline_ref):
r"""Explicit Jacobian function for rv_shift_resid.
This is not a completely analytic solution, but without it there seems to be
numerical instability.
The key equations are:
.. math:: f(x) = R( \lambda(x) (1 - p_0/c) ) \times \exp(p_1 x^2 + p_2 + p_3)
g(x) = (f(x) - d(x))/\sigma(x)
\frac{dg}{dp_0}(x) \approx [f(x + 1 m/s) -f(x) ]/\sigma(x)
\frac{dg}{dp_1}(x) = x^2 f(x) / \sigma(x)
\frac{dg}{dp_2}(x) = x f(x) / \sigma(x)
\frac{dg}{dp_3}(x) = f(x) / \sigma(x)
Parameters
----------
params: float array
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
Returns
-------
jac:
The Jacobian.
"""
ny = len(spect)
xx = (np.arange(ny)-ny//2)/ny
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
jac = np.empty( (ny,4) )
#The Jacobian is the derivative of fitted_spect/sdev with respect to
#p[0] through p[3]
jac[:,3] = fitted_spect/spect_sdev
jac[:,2] = fitted_spect*xx/spect_sdev
jac[:,1] = fitted_spect*xx**2/spect_sdev
jac[:,0] = (spline_ref(wave*(1.0 - (params[0] + 1.0)/const.c.si.value))*
norm - fitted_spect)/spect_sdev
return jac
def create_ref_spect(self, wave, fluxes, vars, bcors, rebin_fact=2,
gauss_sdev=1.0, med_cut=0.6,gauss_hw=7,threshold=100):
"""Create a reference spectrum from a series of target spectra.
The process is:
1) Re-grid the spectra into a rebin_fact times smaller wavelength grid.
2) The spectra are barycentrically corrected by linear interpolation. Note
that when used on a small data set, typically the spectra will be shifted by
many km/s. For an RV-stable star, the fitting process then needs to find the
opposite of this barycentric velocity.
3) Remove bad (i.e. low flux) files.
4) Median combine the spectra.
5) Convolve the result by a Gaussian to remove high spatial frequency noise. This
can be important when the reference spectrum is created from only a small
number of input spectra, and high-frequency noise can be effectively fitted to
itself.
Parameters
----------
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
rebin_fact: int
Factor by which to rebin.
gauss_sdev:
...
med_cut:
...
gauss_hw:
...
Returns
-------
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel*2+2),
where the wavelength scale has been interpolated.
ref_spect: 2D np.array(float)
Reference spectrum of form (Order, Flux/pixel*2+2),
where the flux scale has been interpolated.
"""
nm = fluxes.shape[1]
ny = fluxes.shape[2]
nf = fluxes.shape[0]
C = const.c.si.value
#Create arrays for our outputs.
wave_ref = np.empty( (nm,rebin_fact*ny + 2) )
ref_spect = np.empty( (nm,rebin_fact*ny + 2) )
#First, rebin everything, using opticstools.utils.regrid_fft
new_shape = (fluxes.shape[1],rebin_fact*fluxes.shape[2])
fluxes_rebin = np.empty( (fluxes.shape[0],fluxes.shape[1],
rebin_fact*fluxes.shape[2]) )
for i in range(nf):
fluxes_rebin[i] = ot.utils.regrid_fft(fluxes[i],new_shape)
#Create the final wavelength grid.
for j in range(nm):
wave_ref[j,1:-1] = np.interp(np.arange(rebin_fact*ny)/rebin_fact,
np.arange(ny),wave[j,:])
#Fill in the end wavelengths, including +/-100 km/s from the ends.
wave_ref[j,-2] = wave_ref[j,-3] + (wave_ref[j,-3]-wave_ref[j,-4])
wave_ref[j,0] = wave_ref[j,1] * (C + 1e5)/C
wave_ref[j,-1] = wave_ref[j,-2] * (C - 1e5)/C
#Barycentric correct. For a positive barycentric velocity, the observer is
#moving towards the star, which means that star is blue-shifted and the
#correct rest-frame spectrum is at longer wavelengths. The interpolation
#below shifts the spectrum to the red, as required.
for i in range(nf):
for j in range(nm):
# Awkwardly, we've extended the wavelength scale by 2 elements,
# but haven't yet extended the fluxes...
ww = wave_ref[j,1:-1]
fluxes_rebin[i,j] = np.interp(ww*(1-bcors[i]/C), ww[::-1],
fluxes_rebin[i,j,::-1])
#!!! New Code. This was already checked and makes no sense.
#Combine the spectra.
flux_meds = np.median(fluxes_rebin,axis=2)
flux_files = np.median(flux_meds,axis=1)
if med_cut > 0:
good_files = np.where(flux_files > med_cut*np.median(flux_files))[0]
else:
good_files = np.arange(len(flux_files),dtype=np.int)
flux_orders = np.median(flux_meds[good_files],axis=0)
flux_norm = fluxes_rebin.copy()
for g in good_files:
for j in range(nm):
flux_norm[g,j,:] /= flux_meds[g,j]
#pdb.set_trace()
#Create a median over files
flux_ref = np.median(flux_norm[good_files],axis=0)
#Multiply this by the median for each order
for j in range(nm):
flux_ref[j] *= flux_orders[j]
#Threshold the data whenever the flux is less than "threshold"
if (threshold > 0):
bad = flux_ref<2*threshold
flux_ref[bad] *= np.maximum(flux_ref[bad]-threshold,0)/threshold
# Create a Gaussian smoothing function for the reference spectrum. This
# is needed to prevent a bias to zero radial velocity, especially in the
# case of few data points.
gg = np.exp(-(np.arange(2*gauss_hw+1)-gauss_hw)**2/2.0/gauss_sdev**2)
gg /= np.sum(gg)
one_order = np.empty(flux_ref.shape[1] + 2*gauss_hw)
for j in range(nm):
one_order[gauss_hw:-gauss_hw] = flux_ref[j,:]
one_order[:gauss_hw] = one_order[gauss_hw]
one_order[-gauss_hw:] = one_order[-gauss_hw-1]
ref_spect[j,:] = np.convolve(one_order, gg,
mode='same')[gauss_hw-1:1-gauss_hw]
return wave_ref, ref_spect
def extract_spectra(self, files, extractor, star_dark=None, flat_files=None,
flat_dark=None, location=('151.2094','-33.865',100.0),
coord=None, do_bcor=True, ra_dec_hr=False):
"""Extract the spectrum from a file, given a dark file, a flat file and
a dark for the flat. The process is:
1) Dark correcting the data and the flat fields.
2) Computing (but not applying) Barycentric corrections.
3) Extracting the data and the flat fields using the extract module, to form
:math:`f_m(x)`, the flux for orders m and dispersion direction pixels x.
4) Normalising the flat fields, so that the median of each order is 1.0.
5) Dividing by the extracted flat field. Uncertainties from the flat field are
added in quadrature.
TODO: Not the neatest implementation, but should account for the fact that
there are no flats or darks for the ThAr frames. Might be worth tidying
up and making the implementation a little more elegant.
Parameters
----------
files: list of strings
One string for each file. CAn be on separate nights - a full
pathname should be given.
star_dark:
flat_files: list of strings.
One string for each star file. CAn be on separate nights - a full
pathname should be given.
flat_dark:
location: (lattitude:string, longitude:string, elevation:string)
The location on Earth where the data were taken.
coord: astropy.coordinates.sky_coordinate.SkyCoord
The coordinates of the observation site
do_bcor: boolean
Flag for whether to do barycentric correction
Returns
-------
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
"""
# Initialise list of return values
# Each index represents a single observation
fluxes = []
vars = []
dates = []
bcors = []
#!!! This is dodgy, as files and flat_files should go together in a dict
for ix,file in enumerate(files):
# Dark correct the science and flat frames
# Only if flat/darks have been supplied --> ThAr might not have them
# If not supplied, just use science/reference data
try:
# Dark correct science frames
if len(star_dark) > 0:
data = pyfits.getdata(file) - star_dark
else:
data = pyfits.getdata(file)
# Dark correct flats
if len(flat_files) > 0 and len(flat_dark) > 0:
flat = pyfits.getdata(flat_files[ix]) - flat_dark
elif len(flat_files) > 0:
flat = pyfits.getdata(flat_files[ix])
except:
print('Unable to calibrate file ' + file +
'. Check that format of data arrays are consistent.')
print(pyfits.getdata(file).shape)
print(star_dark.shape)
continue
header = pyfits.getheader(file)
date = Time(header['JD'], format='jd', location=location)
dates.append(date)
# Determine the barycentric correction
if do_bcor:
if not coord:
# Depending on whether the RA and DEC is saved in hours or
# degrees, load and create a SkyCoord object
if ra_dec_hr:
ra_deg = float(header['RA'])*15
else:
ra_deg = float(header['RA'])
dec_deg = float(header['DEC'])
coord = SkyCoord(ra=ra_deg, dec=dec_deg, unit='deg')
if not location:
location=(float(header['LONG']), float(header['LAT']),
float(header['HEIGHT']))
#(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False)
#pdb.set_trace()
bcors.append(1e3*pyasl.helcorr(float(location[0]),
float(location[1]),location[2],coord.ra.deg,
coord.dec.deg,date.jd)[0] )
else:
bcors.append(0.0)
# Extract the fluxes and variance for the science and flat frames
print("Extracting spectra from file #", str(ix))
flux, var = extractor.one_d_extract(data=data, rnoise=20.0)
# Continue only when flats have been supplied
# Perform flat field correction and adjust variances
if len(flat_files) > 0:
flat_flux, fvar = extractor.one_d_extract(data=flat,
rnoise=20.0)
for j in range(flat_flux.shape[0]):
medf = np.median(flat_flux[j])
flat_flux[j] /= medf
fvar[j] /= medf**2
#Calculate the variance after dividing by the flat
var = var/flat_flux**2 + fvar * flux**2/flat_flux**4
#Now normalise the flux.
flux /= flat_flux
# Regardless of whether the data has been flat field corrected,
# append to the arrays and continue
fluxes.append(flux[:,:,0])
vars.append(var[:,:,0])
fluxes = np.array(fluxes)
vars = np.array(vars)
bcors = np.array(bcors)
mjds = np.array([d.mjd for d in dates])
return fluxes, vars, bcors, mjds
def calculate_rv_shift(self, wave_ref, ref_spect, fluxes, vars, bcors,
wave,return_fitted_spects=False,bad_threshold=10):
"""Calculates the Radial Velocity of each spectrum
The radial velocity shift of the reference spectrum required
to match the flux in each order in each input spectrum is calculated
The input fluxes to this method are flat-fielded data, which are then fitted with
a barycentrically corrected reference spectrum :math:`R(\lambda)`, according to
the following equation:
.. math::
f(x) = R( \lambda(x) (1 - p_0/c) ) \\times \exp(p_1 x^2 + p_2 + p_3)
The first term in this equation is simply the velocity corrected spectrum, based on a
the arc-lamp derived reference wavelength scale :math:`\lambda(x)` for pixels coordinates x.
The second term in the equation is a continuum normalisation - a shifted Gaussian was
chosen as a function that is non-zero everywhere. The scipy.optimize.leastsq function is used
to find the best fitting set fof parameters :math:`p_0` through to :math`p_3`.
The reference spectrum function :math:`R(\lambda)` is created using a wavelength grid
which is over-sampled with respect to the data by a factor of 2. Individual fitted
wavelengths are then found by cubic spline interpolation on this :math:`R_j(\lambda_j)`
discrete grid.
Parameters
----------
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel*2+2),
where the wavelength scale has been interpolated.
ref_spect: 2D np.array(float)
Reference spectrum of form (Order, Flux/pixel*2+2),
where the flux scale has been interpolated.
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
Returns
-------
rvs: 2D np.array(float)
Radial velocities of format (Observation, Order)
rv_sigs: 2D np.array(float)
Radial velocity sigmas of format (Observation, Order)
"""
nm = fluxes.shape[1]
ny = fluxes.shape[2]
nf = fluxes.shape[0]
rvs = np.zeros( (nf,nm) )
rv_sigs = np.zeros( (nf,nm) )
initp = np.zeros(4)
initp[3]=0.5
initp[0]=0.0
spect_sdev = np.sqrt(vars)
fitted_spects = np.empty(fluxes.shape)
for i in range(nf):
# Start with initial guess of no intrinsic RV for the target.
initp[0] = -bcors[i] #!!! New Change
nbad=0
for j in range(nm):
# This is the *only* non-linear interpolation function that
# doesn't take forever
spl_ref = interp.InterpolatedUnivariateSpline(wave_ref[j,::-1],
ref_spect[j,::-1])
args = (wave[j,:], fluxes[i,j,:], spect_sdev[i,j,:], spl_ref)
# Remove edge effects in a slightly dodgy way.
# 20 pixels is about 30km/s.
args[2][:20] = np.inf
args[2][-20:] = np.inf
the_fit = op.leastsq(self.rv_shift_resid, initp, args=args,diag=[1e3,1,1,1],Dfun=self.rv_shift_jac, full_output=True)
#the_fit = op.leastsq(self.rv_shift_resid, initp, args=args,diag=[1e3,1e-6,1e-3,1], full_output=True,epsfcn=1e-9)
#The following line also doesn't work "out of the box".
#the_fit = op.minimize(self.rv_shift_chi2,initp,args=args)
#pdb.set_trace()
#Remove bad points...
resid = self.rv_shift_resid( the_fit[0], *args)
wbad = np.where( np.abs(resid) > bad_threshold)[0]
nbad += len(wbad)
#15 bad pixels in a single order is *crazy*
if len(wbad)>20:
fitted_spect = self.rv_shift_resid(the_fit[0], *args, return_spect=True)
plt.clf()
plt.plot(args[0], args[1])
plt.plot(args[0][wbad], args[1][wbad],'o')
plt.plot(args[0], fitted_spect)
plt.xlabel("Wavelength")
plt.ylabel("Flux")
#print("Lots of 'bad' pixels. Type c to continue if not a problem")
#pdb.set_trace()
args[2][wbad] = np.inf
the_fit = op.leastsq(self.rv_shift_resid, initp,args=args, diag=[1e3,1,1,1], Dfun=self.rv_shift_jac, full_output=True)
#the_fit = op.leastsq(self.rv_shift_resid, initp,args=args, diag=[1e3,1e-6,1e-3,1], full_output=True, epsfcn=1e-9)
#Some outputs for testing
fitted_spects[i,j] = self.rv_shift_resid(the_fit[0], *args, return_spect=True)
if ( np.abs(the_fit[0][0] - bcors[i]) < 1e-4 ):
#pdb.set_trace() #This shouldn't happen, and indicates a problem with the fit.
pass
#Save the fit and the uncertainty.
rvs[i,j] = the_fit[0][0]
try:
rv_sigs[i,j] = np.sqrt(the_fit[1][0,0])
except:
rv_sigs[i,j] = np.NaN
print("Done file {0:d}. Bad spectral pixels: {1:d}".format(i,nbad))
if return_fitted_spects:
return rvs, rv_sigs, fitted_spects
else:
return rvs, rv_sigs
def save_fluxes(self, files, fluxes, vars, bcors, wave, mjds, out_path):
"""Method to save the extracted spectra.
TODO:
Might want to remove the dependence on files (to get the headers) as it
will prevent (or complicate) the saving of the reference spectrum.
Parameters
----------
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
out_path: String
The directory to save the extracted fluxes.
"""
# Loop through each extracted spectrum
for i, file in enumerate(files):
#try:
# Extract the header information from the file
header = pyfits.getheader(file)
file_name = file.split("/")[-1].split(".")[0] + "_extracted.fits"
full_path = out_path + file_name
# Save to fits
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(fluxes[i], header))
hl.append(pyfits.ImageHDU(vars[i]))
hl.append(pyfits.ImageHDU(wave))
col1 = pyfits.Column(name='bcor', format='D',
array=np.array([bcors[i]]))
col2 = pyfits.Column(name='mjd', format='D',
array=np.array([mjds[i]]))
cols = pyfits.ColDefs([col1, col2])
hl.append(pyfits.new_table(cols))
hl.writeto(full_path, clobber=True)
#except:
#print("Error: Some files may not have been saved.")
#print("Likely due to incompatible array sizes for frames.")
#continue
def save_ref_spect(self, files, ref_spect, vars_ref, wave_ref, bcors, mjds,
out_path, object):
"""Method to save an extracted reference spectrum
Parameters
----------
ref_spect: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars_ref: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation used to create ref_spect
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation used to create
ref_spect
out_path: String
The directory to save the reference spectrum
object: String
The object object observed.
"""
header = pyfits.header.Header()
n = str(len(files))
full_path = out_path + "reference_spectrum_" + n + "_" + object +".fits"
# Record which spectra were used to create the reference
for i, file in enumerate(files):
# Extract the file name of each file and store in the header
file_name = file.split("/")[-1].split(".")[0] + "_extracted.fits"
header_name = "COMB" + str(i)
comment = "Combined spectrum #" + str(i)
header[header_name] = (file_name, comment)
# Save to fits
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(ref_spect, header))
hl.append(pyfits.ImageHDU(vars_ref[0]))
hl.append(pyfits.ImageHDU(wave_ref))
col1 = pyfits.Column(name='bcor', format='D', array=np.array([bcors[0]]))
col2 = pyfits.Column(name='mjd', format='D',
array=np.array([mjds[0]]))
cols = pyfits.ColDefs([col1, col2])
hl.append(pyfits.new_table(cols))
hl.writeto(full_path, clobber=True)
def load_ref_spect(self, path):
"""Method to load a previously saved reference spectrum
Parameters
----------
path: string
The file path to the saved reference spectrum.
Returns
-------
ref_spect: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars_ref: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
bcors_ref: 1D np.array(float)
Barycentric correction for each observation used to create ref_spect
mjds_ref: 1D np.array(float)
Modified Julian Date (MJD) of each observation used to create
ref_spect
"""
hl = pyfits.open(path)
ref_spect = hl[0].data
vars_ref = hl[1].data
wave_ref = hl[2].data
bcors_ref = hl[3].data['bcor'][0]
mjds_ref = hl[3].data['mjd'][0]
hl.close()
return ref_spect, vars_ref, wave_ref, bcors_ref, mjds_ref
def load_fluxes(self, files):
"""Loads previously saved fluxes.
Parameters
----------
files: [string]
String list of filepaths of the saved fluxes
Returns
-------
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
"""
fluxes = []
vars = []
wave = []
bcors = []
mjds = []
for f in files:
hl = pyfits.open(f)
fluxes.append(hl[0].data)
vars.append(hl[1].data)
wave = hl[2].data # Only need one (assumption of same instrument)
bcors.append(hl[3].data['bcor'][0])
mjds.append(hl[3].data['mjd'][0])
hl.close()
fluxes = np.array(fluxes)
vars = np.array(vars)
#wave = np.array(hl[2].data)
bcors = np.array(bcors)
mjds = np.array(mjds)
return fluxes, vars, wave, bcors, mjds
def plot_rvs(self, rvs, rv_sigs, mjds, dates, bcors, plot_title):
"""Plots the barycentrically corrected Radial Velocities.
Note:
Not complete.
Parameters
----------
rvs: 2D np.array(float)
Radial velocities of format (Observation, Order)
rv_sigs: 2D np.array(float)
Radial velocity sigmas of format (Observation, Order)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
bcors: 1D np.array(float)
Barycentric correction for each observation.
plot_title: String
Name of the plot
"""
# Dimensions (Number of observations and orders respectively)
nf = rvs.shape[0]
nm = rvs.shape[1]
# Plot the Barycentric corrected RVs. Note that a median over all orders
# is only a first step - a weighted mean is needed.
plt.clf()
rvs += bcors.repeat(nm).reshape( (nf,nm) )
rv_mn, wt_sum = np.average(rvs,axis=1, weights=1.0/rv_sigs**2,
returned=True)
rv_mn_sig = 1.0/np.sqrt(wt_sum)
rv_med1 = np.median(rvs,1)
rv_med2 = np.median(rvs[:,3:20],1)
#plt.plot_date([dates[i].plot_date for i in range(len(dates))], rv_mn)
#plt.errorbar(mjds, rv_mn, yerr=rv_mn_sig,fmt='o')
plt.errorbar(mjds, rv_med2, yerr=rv_mn_sig,fmt='o')
plt.xlabel('Date (MJD)')
plt.ylabel('Barycentric RV (m/s)')
plt.title(plot_title)
plt.plot_date([dates[i].plot_date for i in range(len(dates))], rv_mn)
plt.show()
def save_rvs(self, rvs, rv_sigs, bcor, mjds, bcor_rvs, base_save_path):
"""Method for saving calculated radial velocities and their errors to
csv files.
Parameters
----------
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel*2+2),
where the wavelength scale has been interpolated.
ref_spect: 2D np.array(float)
Reference spectrum of form (Order, Flux/pixel*2+2),
where the flux scale has been interpolated.
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
base_save_path: string
The base of each of the csv file paths.
"""
# Dimensions (Number of observations and orders respectively)
nf = rvs.shape[0]
nm = rvs.shape[1]
# Setup save paths
rv_file = base_save_path + "_" + str(rvs.shape[0]) + "_rvs.csv"
rv_sig_file = base_save_path + "_" + str(rvs.shape[0]) + "_rv_sig.csv"
bcor_file = base_save_path + "_" + str(rvs.shape[0]) + "_bcor.csv"
bcor_rv_file = base_save_path + "_" + str(rvs.shape[0]) + "_bcor_rv.csv"
# Headers for each csv
rv_h = "RV in m/s for each order, for each MJD epoch"
rv_sig_h = "RV uncertainties in m/s for each order, for each MJD epoch"
bcor_h = "Barycentric correction in m/s"
bcor_rvs_h = "Barycentrically corrected RVs in m/s"
# Save rvs and errors
np.savetxt(rv_file, np.append(mjds.reshape(nf,1), rvs,axis=1),
fmt="%10.4f" + nm*", %6.1f", header=rv_h)
np.savetxt(rv_sig_file, np.append(mjds.reshape(nf,1),rv_sigs,axis=1),
fmt="%10.4f" + nm*", %6.1f", header=rv_sig_h)
np.savetxt(bcor_file, np.append(mjds.reshape(nf,1),bcor.reshape(nf,1),axis=1),
fmt="%10.4f" + ", %6.1f", header=bcor_h)
np.savetxt(bcor_rv_file, np.append(mjds.reshape(nf,1), bcor_rvs,axis=1),
fmt="%10.4f" + nm*", %6.1f", header=bcor_rvs_h)
def load_rvs(self, rvs_path, rv_sig_path, bcor_path=None):
"""Opens the saved RV, RV sig and bcor csv files and formats the
contents to be easily usable and non-redundant
Parameters
----------
rvs_path: string
File path to the rv csv
rv_sig_path: string
File path to the rv sig csv
bcor_path: string
File path to the bcor csv
Returns
-------
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
raw_rvs: 2D np.array(float)
Radial velocities of format (Observation, Order)
raw_rv_sigs: 2D np.array(float)
Radial velocity sigmas of format (Observation, Order)
raw_bcor: 1D np.array(float)
RV barycentric correction for each observation
bcors_rvs: 2D np.array(float)
Barycentrically corrected radial velocity sigmas of format
(Observation, Order)
"""
# Import
rvs = np.loadtxt(rvs_path, delimiter=",")
rv_sig = np.loadtxt(rv_sig_path, delimiter=",")
# Format to remove mjd values from start of each row
mjds = rvs[:,0]
raw_rvs = rvs[:,1:]
raw_rv_sig = rv_sig[:,1:]
# Number of observations and orders respectively
nf = len(mjds)
nm = raw_rvs.shape[1]
# Only deal with barycentric correction if it is passed in
# (It may not be when dealing with ThAr files)
if bcor_path is not None:
bcors = np.loadtxt(bcor_path, delimiter=",")
raw_bcor = bcors[:,1]
bcor_rvs = raw_rvs + raw_bcor.repeat(nm).reshape( (nf, nm) )
return mjds, raw_rvs, raw_rv_sig, raw_bcor, bcor_rvs
else:
return mjds, raw_rvs, raw_rv_sig
| |
# Copyright (C) 2016-2022 Lightbits Labs Ltd.
# Copyright (C) 2020 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import http.client
import os
import re
import tempfile
import time
import traceback
from oslo_concurrency import lockutils
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from os_brick import exception
from os_brick.i18n import _
from os_brick.initiator.connectors import base
from os_brick.privileged import lightos as priv_lightos
from os_brick import utils
DEVICE_SCAN_ATTEMPTS_DEFAULT = 5
DISCOVERY_CLIENT_PORT = 6060
LOG = logging.getLogger(__name__)
synchronized = lockutils.synchronized_with_prefix('os-brick-')
nvmec_pattern = ".*nvme[0-9]+[cp][0-9]+.*"
nvmec_match = re.compile(nvmec_pattern)
class LightOSConnector(base.BaseLinuxConnector):
"""Connector class to attach/detach LightOS volumes using NVMe/TCP."""
WAIT_DEVICE_TIMEOUT = 60
def __init__(self,
root_helper,
driver=None,
execute=None,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
message_queue=None,
*args,
**kwargs):
super(LightOSConnector, self).__init__(
root_helper,
driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
self.message_queue = message_queue
self.DISCOVERY_DIR_PATH = '/etc/discovery-client/discovery.d/'
@staticmethod
def get_connector_properties(root_helper, *args, **kwargs):
"""The LightOS connector properties."""
props = {}
lightos_connector = LightOSConnector(root_helper=root_helper,
message_queue=None,
execute=kwargs.get('execute'))
hostnqn = utils.get_host_nqn()
found_dsc = lightos_connector.find_dsc()
if not found_dsc:
LOG.debug('LIGHTOS: did not find dsc, continuing anyway.')
if hostnqn:
LOG.debug("LIGHTOS: finally hostnqn: %s dsc: %s",
hostnqn, found_dsc)
props['nqn'] = hostnqn
props['found_dsc'] = found_dsc
else:
LOG.debug('LIGHTOS: no hostnqn found.')
return props
def dsc_file_name(self, uuid):
return os.path.join(self.DISCOVERY_DIR_PATH, "%s.conf" % uuid)
def find_dsc(self):
conn = http.client.HTTPConnection("localhost", DISCOVERY_CLIENT_PORT)
try:
conn.request("HEAD", "/metrics")
resp = conn.getresponse()
return 'found' if resp.status == http.client.OK else ''
except Exception as e:
LOG.debug(f'LIGHTOS: {e}')
out = ''
return out
def dsc_need_connect(self, connection_info):
return not os.path.isfile(self.dsc_file_name(connection_info['uuid']))
def dsc_connect_volume(self, connection_info):
if not self.dsc_need_connect(connection_info):
return
subsysnqn = connection_info['subsysnqn']
uuid = connection_info['uuid']
hostnqn = utils.get_host_nqn()
with tempfile.NamedTemporaryFile(mode='w', delete=False) as dscfile:
dscfile.write('# os_brick connector dsc file for LightOS'
' volume: {}\n'.format(uuid))
for (ip, node) in connection_info['lightos_nodes'].items():
transport = node['transport_type']
host = node['target_portal']
port = node['target_port']
dscfile.write('-t {} -a {} -s {} -q {} -n {}\n'.format(
transport, host, port, hostnqn, subsysnqn))
dscfile.flush()
try:
dest_name = self.dsc_file_name(uuid)
priv_lightos.move_dsc_file(dscfile.name, dest_name)
except Exception:
LOG.warning(
"LIGHTOS: Failed to create dsc file for connection with"
f" uuid:{uuid}")
raise
def dsc_disconnect_volume(self, connection_info):
uuid = connection_info['uuid']
try:
priv_lightos.delete_dsc_file(self.dsc_file_name(uuid))
except Exception:
LOG.warning("LIGHTOS: Failed delete dsc file uuid:{}".format(uuid))
raise
def monitor_db(self, lightos_db):
for connection_info in lightos_db.values():
self.dsc_connect_volume(connection_info)
def monitor_message_queue(self, message_queue, lightos_db):
while not message_queue.empty():
msg = message_queue.get()
op, connection = msg
LOG.debug("LIGHTOS: queue got op: %s, connection: %s",
op, connection)
if op == 'delete':
LOG.info("LIGHTOS: Removing volume: %s from db",
connection['uuid'])
if connection['uuid'] in lightos_db:
del lightos_db[connection['uuid']]
else:
LOG.warning("LIGHTOS: No volume: %s found in db",
connection['uuid'])
elif op == 'add':
LOG.info("LIGHTOS: Adding volume: %s to db",
connection['uuid'])
lightos_db[connection['uuid']] = connection
def lightos_monitor(self, lightos_db, message_queue):
'''Bookkeeping lightos connections.
This is useful when the connector is comming up to a running node with
connected volumes already exists.
This is used in the Nova driver to restore connections after reboot
'''
first_time = True
while True:
self.monitor_db(lightos_db)
# give us some time before trying to access the MQ
# for the first time
if first_time:
time.sleep(5)
first_time = False
else:
time.sleep(1)
self.monitor_message_queue(message_queue, lightos_db)
# This is part of our abstract interface
def get_search_path(self):
return '/dev'
# This is part of our abstract interface
def get_volume_paths(self, connection_properties):
path = connection_properties['device_path']
return [path]
def _check_device_exists_using_dev_lnk(self, uuid):
lnk_path = f"/dev/disk/by-id/nvme-uuid.{uuid}"
if os.path.exists(lnk_path):
devname = os.path.realpath(lnk_path)
if devname.startswith("/dev/nvme"):
LOG.info("LIGHTOS: devpath %s detected for uuid %s",
devname, uuid)
return devname
return None
def _check_device_exists_reading_block_class(self, uuid):
file_path = "/sys/class/block/*/wwid"
wwid = "uuid." + uuid
for match_path in glob.glob(file_path):
try:
with open(match_path, "r") as f:
match_wwid = f.readline()
except Exception:
LOG.warning("LIGHTOS: Failed to read file %s",
match_path)
continue
if wwid != match_wwid.strip():
continue
# skip slave nvme devices, for example: nvme0c0n1
if nvmec_match.match(match_path.split("/")[-2]):
continue
LOG.info("LIGHTOS: matching uuid %s was found"
" for device path %s", uuid, match_path)
return os.path.join("/dev", match_path.split("/")[-2])
return None
@utils.trace
def _get_device_by_uuid(self, uuid):
endtime = time.time() + self.WAIT_DEVICE_TIMEOUT
while time.time() < endtime:
try:
device = self._check_device_exists_using_dev_lnk(uuid)
if device:
return device
except Exception as e:
LOG.debug(f'LIGHTOS: {e}')
device = self._check_device_exists_reading_block_class(uuid)
if device:
return device
time.sleep(1)
return None
def _get_size_by_uuid(self, uuid):
devpath = self._get_device_by_uuid(uuid)
devname = devpath.split("/")[-1]
try:
size_path_name = os.path.join("/sys/class/block/", devname, "size")
with open(size_path_name, "r") as f:
size_blks = f.read().strip()
bytesize = int(size_blks) * 512
return bytesize
except Exception:
LOG.warning("LIGHTOS: Could not find the size at for"
" uuid %s in %s", uuid, devpath)
return None
@utils.trace
@synchronized('volume_op')
def connect_volume(self, connection_properties):
"""Discover and attach the volume.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
connection_properties must include:
nqn - NVMe subsystem name to the volume to be connected
target_port - NVMe target port that hosts the nqn sybsystem
target_portal - NVMe target ip that hosts the nqn sybsystem
:type connection_properties: dict
:returns: dict
"""
device_info = {'type': 'block'}
uuid = connection_properties['uuid']
LOG.info("LIGHTOS: connect_volume called for volume %s, connection"
" properties: %s",
uuid, connection_properties)
self.dsc_connect_volume(connection_properties)
device_path = self._get_device_by_uuid(uuid)
if not device_path:
msg = _("Device with uuid %s did not show up" % uuid)
priv_lightos.delete_dsc_file(self.dsc_file_name(uuid))
raise exception.BrickException(message=msg)
device_info['path'] = device_path
# bookkeeping lightos connections - add connection
if self.message_queue:
self.message_queue.put(('add', connection_properties))
return device_info
@utils.trace
@synchronized('volume_op')
def disconnect_volume(self, connection_properties, device_info,
force=False, ignore_errors=False):
"""Disconnect a volume from the local host.
The connection_properties are the same as from connect_volume.
The device_info is returned from connect_volume.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
:param device_info: historical difference, but same as connection_props
:type device_info: dict
:param force: Whether to forcefully disconnect even if flush fails.
:type force: bool
:param ignore_errors: When force is True, this will decide whether to
ignore errors or raise an exception once finished
the operation. Default is False.
:type ignore_errors: bool
"""
# bookkeeping lightos connections - delete connection
if self.message_queue:
self.message_queue.put(('delete', connection_properties))
uuid = connection_properties['uuid']
LOG.debug('LIGHTOS: disconnect_volume called for volume %s', uuid)
device_path = self._get_device_by_uuid(uuid)
exc = exception.ExceptionChainer()
try:
if device_path:
self._linuxscsi.flush_device_io(device_path)
except putils.ProcessExecutionError as e:
exc.add_exception(type(e), e, traceback.format_exc())
if not (force or ignore_errors):
raise
try:
self.dsc_disconnect_volume(connection_properties)
except Exception as e:
exc.add_exception(type(e), e, traceback.format_exc())
if exc:
if not ignore_errors:
raise exc
@utils.trace
@synchronized('volume_op')
def extend_volume(self, connection_properties):
uuid = connection_properties['uuid']
new_size = self._get_size_by_uuid(uuid)
return new_size
| |
"""
A test spanning all the capabilities of all the serializers.
This class defines sample data and a dynamically generated
test case that is capable of testing the capabilities of
the serializers. This includes all valid data values, plus
forward, backwards and self references.
"""
from __future__ import unicode_literals
import datetime
import decimal
from unittest import skipUnless
from django.core import serializers
from django.core.serializers import SerializerDoesNotExist
from django.core.serializers.base import DeserializationError
from django.core.serializers.xml_serializer import DTDForbidden
from django.db import connection, models
from django.http import HttpResponse
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.functional import curry
from .models import (
Anchor, AutoNowDateTimeData, BaseModel, BigIntegerData, BinaryData,
BooleanData, BooleanPKData, CharData, CharPKData, ComplexModel, DateData,
DateTimeData, DecimalData, DecimalPKData, EmailData, EmailPKData,
ExplicitInheritBaseModel, FileData, FilePathData, FilePathPKData, FKData,
FKDataNaturalKey, FKDataToField, FKDataToO2O, FKSelfData, FloatData,
FloatPKData, GenericData, GenericIPAddressData, GenericIPAddressPKData,
InheritAbstractModel, InheritBaseModel, IntegerData, IntegerPKData,
Intermediate, LengthModel, M2MData, M2MIntermediateData, M2MSelfData,
ModifyingSaveData, NaturalKeyAnchor, NullBooleanData, O2OData,
PositiveIntegerData, PositiveIntegerPKData, PositiveSmallIntegerData,
PositiveSmallIntegerPKData, ProxyBaseModel, ProxyProxyBaseModel, SlugData,
SlugPKData, SmallData, SmallPKData, Tag, TextData, TimeData, UniqueAnchor,
)
try:
import yaml
except ImportError:
yaml = None
# A set of functions that can be used to recreate
# test data objects of various kinds.
# The save method is a raw base model save, to make
# sure that the data in the database matches the
# exact test case.
def data_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def generic_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data[0]
models.Model.save_base(instance, raw=True)
for tag in data[1:]:
instance.tags.create(data=tag)
return [instance]
def fk_create(pk, klass, data):
instance = klass(id=pk)
setattr(instance, 'data_id', data)
models.Model.save_base(instance, raw=True)
return [instance]
def m2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
instance.data = data
return [instance]
def im2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
return [instance]
def im_create(pk, klass, data):
instance = klass(id=pk)
instance.right_id = data['right']
instance.left_id = data['left']
if 'extra' in data:
instance.extra = data['extra']
models.Model.save_base(instance, raw=True)
return [instance]
def o2o_create(pk, klass, data):
instance = klass()
instance.data_id = data
models.Model.save_base(instance, raw=True)
return [instance]
def pk_create(pk, klass, data):
instance = klass()
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def inherited_create(pk, klass, data):
instance = klass(id=pk, **data)
# This isn't a raw save because:
# 1) we're testing inheritance, not field behavior, so none
# of the field values need to be protected.
# 2) saving the child class and having the parent created
# automatically is easier than manually creating both.
models.Model.save(instance)
created = [instance]
for klass, field in instance._meta.parents.items():
created.append(klass.objects.get(id=pk))
return created
# A set of functions that can be used to compare
# test data objects of various kinds
def data_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
if klass == BinaryData and data is not None:
testcase.assertEqual(bytes(data), bytes(instance.data),
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, repr(bytes(data)), type(data), repr(bytes(instance.data)),
type(instance.data))
)
else:
testcase.assertEqual(data, instance.data,
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, data, type(data), instance, type(instance.data))
)
def generic_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data[0], instance.data)
testcase.assertEqual(data[1:], [t.data for t in instance.tags.order_by('id')])
def fk_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, instance.data_id)
def m2m_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, [obj.id for obj in instance.data.order_by('id')])
def im2m_compare(testcase, pk, klass, data):
klass.objects.get(id=pk)
# actually nothing else to check, the instance just should exist
def im_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data['left'], instance.left_id)
testcase.assertEqual(data['right'], instance.right_id)
if 'extra' in data:
testcase.assertEqual(data['extra'], instance.extra)
else:
testcase.assertEqual("doesn't matter", instance.extra)
def o2o_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data_id)
def pk_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data)
def inherited_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
for key, value in data.items():
testcase.assertEqual(value, getattr(instance, key))
# Define some data types. Each data type is
# actually a pair of functions; one to create
# and one to compare objects of that type
data_obj = (data_create, data_compare)
generic_obj = (generic_create, generic_compare)
fk_obj = (fk_create, fk_compare)
m2m_obj = (m2m_create, m2m_compare)
im2m_obj = (im2m_create, im2m_compare)
im_obj = (im_create, im_compare)
o2o_obj = (o2o_create, o2o_compare)
pk_obj = (pk_create, pk_compare)
inherited_obj = (inherited_create, inherited_compare)
test_data = [
# Format: (data type, PK value, Model Class, data)
(data_obj, 1, BinaryData, six.memoryview(b"\x05\xFD\x00")),
(data_obj, 2, BinaryData, None),
(data_obj, 5, BooleanData, True),
(data_obj, 6, BooleanData, False),
(data_obj, 10, CharData, "Test Char Data"),
(data_obj, 11, CharData, ""),
(data_obj, 12, CharData, "None"),
(data_obj, 13, CharData, "null"),
(data_obj, 14, CharData, "NULL"),
(data_obj, 15, CharData, None),
# (We use something that will fit into a latin1 database encoding here,
# because that is still the default used on many system setups.)
(data_obj, 16, CharData, '\xa5'),
(data_obj, 20, DateData, datetime.date(2006, 6, 16)),
(data_obj, 21, DateData, None),
(data_obj, 30, DateTimeData, datetime.datetime(2006, 6, 16, 10, 42, 37)),
(data_obj, 31, DateTimeData, None),
(data_obj, 40, EmailData, "hovercraft@example.com"),
(data_obj, 41, EmailData, None),
(data_obj, 42, EmailData, ""),
(data_obj, 50, FileData, 'file:///foo/bar/whiz.txt'),
# (data_obj, 51, FileData, None),
(data_obj, 52, FileData, ""),
(data_obj, 60, FilePathData, "/foo/bar/whiz.txt"),
(data_obj, 61, FilePathData, None),
(data_obj, 62, FilePathData, ""),
(data_obj, 70, DecimalData, decimal.Decimal('12.345')),
(data_obj, 71, DecimalData, decimal.Decimal('-12.345')),
(data_obj, 72, DecimalData, decimal.Decimal('0.0')),
(data_obj, 73, DecimalData, None),
(data_obj, 74, FloatData, 12.345),
(data_obj, 75, FloatData, -12.345),
(data_obj, 76, FloatData, 0.0),
(data_obj, 77, FloatData, None),
(data_obj, 80, IntegerData, 123456789),
(data_obj, 81, IntegerData, -123456789),
(data_obj, 82, IntegerData, 0),
(data_obj, 83, IntegerData, None),
# (XX, ImageData
(data_obj, 95, GenericIPAddressData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
(data_obj, 96, GenericIPAddressData, None),
(data_obj, 100, NullBooleanData, True),
(data_obj, 101, NullBooleanData, False),
(data_obj, 102, NullBooleanData, None),
(data_obj, 120, PositiveIntegerData, 123456789),
(data_obj, 121, PositiveIntegerData, None),
(data_obj, 130, PositiveSmallIntegerData, 12),
(data_obj, 131, PositiveSmallIntegerData, None),
(data_obj, 140, SlugData, "this-is-a-slug"),
(data_obj, 141, SlugData, None),
(data_obj, 142, SlugData, ""),
(data_obj, 150, SmallData, 12),
(data_obj, 151, SmallData, -12),
(data_obj, 152, SmallData, 0),
(data_obj, 153, SmallData, None),
(data_obj, 160, TextData, """This is a long piece of text.
It contains line breaks.
Several of them.
The end."""),
(data_obj, 161, TextData, ""),
(data_obj, 162, TextData, None),
(data_obj, 170, TimeData, datetime.time(10, 42, 37)),
(data_obj, 171, TimeData, None),
(generic_obj, 200, GenericData, ['Generic Object 1', 'tag1', 'tag2']),
(generic_obj, 201, GenericData, ['Generic Object 2', 'tag2', 'tag3']),
(data_obj, 300, Anchor, "Anchor 1"),
(data_obj, 301, Anchor, "Anchor 2"),
(data_obj, 302, UniqueAnchor, "UAnchor 1"),
(fk_obj, 400, FKData, 300), # Post reference
(fk_obj, 401, FKData, 500), # Pre reference
(fk_obj, 402, FKData, None), # Empty reference
(m2m_obj, 410, M2MData, []), # Empty set
(m2m_obj, 411, M2MData, [300, 301]), # Post reference
(m2m_obj, 412, M2MData, [500, 501]), # Pre reference
(m2m_obj, 413, M2MData, [300, 301, 500, 501]), # Pre and Post reference
(o2o_obj, None, O2OData, 300), # Post reference
(o2o_obj, None, O2OData, 500), # Pre reference
(fk_obj, 430, FKSelfData, 431), # Pre reference
(fk_obj, 431, FKSelfData, 430), # Post reference
(fk_obj, 432, FKSelfData, None), # Empty reference
(m2m_obj, 440, M2MSelfData, []),
(m2m_obj, 441, M2MSelfData, []),
(m2m_obj, 442, M2MSelfData, [440, 441]),
(m2m_obj, 443, M2MSelfData, [445, 446]),
(m2m_obj, 444, M2MSelfData, [440, 441, 445, 446]),
(m2m_obj, 445, M2MSelfData, []),
(m2m_obj, 446, M2MSelfData, []),
(fk_obj, 450, FKDataToField, "UAnchor 1"),
(fk_obj, 451, FKDataToField, "UAnchor 2"),
(fk_obj, 452, FKDataToField, None),
(fk_obj, 460, FKDataToO2O, 300),
(im2m_obj, 470, M2MIntermediateData, None),
# testing post- and prereferences and extra fields
(im_obj, 480, Intermediate, {'right': 300, 'left': 470}),
(im_obj, 481, Intermediate, {'right': 300, 'left': 490}),
(im_obj, 482, Intermediate, {'right': 500, 'left': 470}),
(im_obj, 483, Intermediate, {'right': 500, 'left': 490}),
(im_obj, 484, Intermediate, {'right': 300, 'left': 470, 'extra': "extra"}),
(im_obj, 485, Intermediate, {'right': 300, 'left': 490, 'extra': "extra"}),
(im_obj, 486, Intermediate, {'right': 500, 'left': 470, 'extra': "extra"}),
(im_obj, 487, Intermediate, {'right': 500, 'left': 490, 'extra': "extra"}),
(im2m_obj, 490, M2MIntermediateData, []),
(data_obj, 500, Anchor, "Anchor 3"),
(data_obj, 501, Anchor, "Anchor 4"),
(data_obj, 502, UniqueAnchor, "UAnchor 2"),
(pk_obj, 601, BooleanPKData, True),
(pk_obj, 602, BooleanPKData, False),
(pk_obj, 610, CharPKData, "Test Char PKData"),
# (pk_obj, 620, DatePKData, datetime.date(2006, 6, 16)),
# (pk_obj, 630, DateTimePKData, datetime.datetime(2006, 6, 16, 10, 42, 37)),
(pk_obj, 640, EmailPKData, "hovercraft@example.com"),
# (pk_obj, 650, FilePKData, 'file:///foo/bar/whiz.txt'),
(pk_obj, 660, FilePathPKData, "/foo/bar/whiz.txt"),
(pk_obj, 670, DecimalPKData, decimal.Decimal('12.345')),
(pk_obj, 671, DecimalPKData, decimal.Decimal('-12.345')),
(pk_obj, 672, DecimalPKData, decimal.Decimal('0.0')),
(pk_obj, 673, FloatPKData, 12.345),
(pk_obj, 674, FloatPKData, -12.345),
(pk_obj, 675, FloatPKData, 0.0),
(pk_obj, 680, IntegerPKData, 123456789),
(pk_obj, 681, IntegerPKData, -123456789),
(pk_obj, 682, IntegerPKData, 0),
# (XX, ImagePKData
(pk_obj, 695, GenericIPAddressPKData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
# (pk_obj, 700, NullBooleanPKData, True),
# (pk_obj, 701, NullBooleanPKData, False),
(pk_obj, 720, PositiveIntegerPKData, 123456789),
(pk_obj, 730, PositiveSmallIntegerPKData, 12),
(pk_obj, 740, SlugPKData, "this-is-a-slug"),
(pk_obj, 750, SmallPKData, 12),
(pk_obj, 751, SmallPKData, -12),
(pk_obj, 752, SmallPKData, 0),
# (pk_obj, 760, TextPKData, """This is a long piece of text.
# It contains line breaks.
# Several of them.
# The end."""),
# (pk_obj, 770, TimePKData, datetime.time(10, 42, 37)),
# (pk_obj, 790, XMLPKData, "<foo></foo>"),
(data_obj, 800, AutoNowDateTimeData, datetime.datetime(2006, 6, 16, 10, 42, 37)),
(data_obj, 810, ModifyingSaveData, 42),
(inherited_obj, 900, InheritAbstractModel, {'child_data': 37, 'parent_data': 42}),
(inherited_obj, 910, ExplicitInheritBaseModel, {'child_data': 37, 'parent_data': 42}),
(inherited_obj, 920, InheritBaseModel, {'child_data': 37, 'parent_data': 42}),
(data_obj, 1000, BigIntegerData, 9223372036854775807),
(data_obj, 1001, BigIntegerData, -9223372036854775808),
(data_obj, 1002, BigIntegerData, 0),
(data_obj, 1003, BigIntegerData, None),
(data_obj, 1004, LengthModel, 0),
(data_obj, 1005, LengthModel, 1),
]
natural_key_test_data = [
(data_obj, 1100, NaturalKeyAnchor, "Natural Key Anghor"),
(fk_obj, 1101, FKDataNaturalKey, 1100),
(fk_obj, 1102, FKDataNaturalKey, None),
]
# Because Oracle treats the empty string as NULL, Oracle is expected to fail
# when field.empty_strings_allowed is True and the value is None; skip these
# tests.
if connection.features.interprets_empty_strings_as_nulls:
test_data = [data for data in test_data
if not (data[0] == data_obj and
data[2]._meta.get_field('data').empty_strings_allowed and
data[3] is None)]
# Regression test for #8651 -- a FK to an object with PK of 0
# This won't work on MySQL since it won't let you create an object
# with an autoincrement primary key of 0,
if connection.features.allows_auto_pk_0:
test_data.extend([
(data_obj, 0, Anchor, "Anchor 0"),
(fk_obj, 465, FKData, 0),
])
# Dynamically create serializer tests to ensure that all
# registered serializers are automatically tested.
@skipUnlessDBFeature('can_defer_constraint_checks')
class SerializerTests(TestCase):
def test_get_unknown_serializer(self):
"""
#15889: get_serializer('nonsense') raises a SerializerDoesNotExist
"""
with self.assertRaises(SerializerDoesNotExist):
serializers.get_serializer("nonsense")
with self.assertRaises(KeyError):
serializers.get_serializer("nonsense")
# SerializerDoesNotExist is instantiated with the nonexistent format
with self.assertRaises(SerializerDoesNotExist) as cm:
serializers.get_serializer("nonsense")
self.assertEqual(cm.exception.args, ("nonsense",))
def test_unregister_unknown_serializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.unregister_serializer("nonsense")
def test_get_unknown_deserializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.get_deserializer("nonsense")
def test_json_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("json", """[{"pk":1}"""):
pass
@skipUnless(yaml, "PyYAML not installed")
def test_yaml_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("yaml", "{"):
pass
def test_serialize_proxy_model(self):
BaseModel.objects.create(parent_data=1)
base_objects = BaseModel.objects.all()
proxy_objects = ProxyBaseModel.objects.all()
proxy_proxy_objects = ProxyProxyBaseModel.objects.all()
base_data = serializers.serialize("json", base_objects)
proxy_data = serializers.serialize("json", proxy_objects)
proxy_proxy_data = serializers.serialize("json", proxy_proxy_objects)
self.assertEqual(base_data, proxy_data.replace('proxy', ''))
self.assertEqual(base_data, proxy_proxy_data.replace('proxy', ''))
def serializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Add the generic tagged objects to the object list
objects.extend(Tag.objects.all())
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
def naturalKeySerializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in natural_key_test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2,
use_natural_foreign_keys=True)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in natural_key_test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
def fieldsTest(format, self):
obj = ComplexModel(field1='first', field2='second', field3='third')
obj.save_base(raw=True)
# Serialize then deserialize the test database
serialized_data = serializers.serialize(format, [obj], indent=2, fields=('field1', 'field3'))
result = next(serializers.deserialize(format, serialized_data))
# Check that the deserialized object contains data in only the serialized fields.
self.assertEqual(result.object.field1, 'first')
self.assertEqual(result.object.field2, '')
self.assertEqual(result.object.field3, 'third')
def streamTest(format, self):
obj = ComplexModel(field1='first', field2='second', field3='third')
obj.save_base(raw=True)
# Serialize the test database to a stream
for stream in (six.StringIO(), HttpResponse()):
serializers.serialize(format, [obj], indent=2, stream=stream)
# Serialize normally for a comparison
string_data = serializers.serialize(format, [obj], indent=2)
# Check that the two are the same
if isinstance(stream, six.StringIO):
self.assertEqual(string_data, stream.getvalue())
else:
self.assertEqual(string_data, stream.content.decode('utf-8'))
def naturalKeyTest(format, self):
book1 = {'data': '978-1590597255', 'title': 'The Definitive Guide to '
'Django: Web Development Done Right'}
book2 = {'data': '978-1590599969', 'title': 'Practical Django Projects'}
# Create the books.
adrian = NaturalKeyAnchor.objects.create(**book1)
james = NaturalKeyAnchor.objects.create(**book2)
# Serialize the books.
string_data = serializers.serialize(format, NaturalKeyAnchor.objects.all(),
indent=2, use_natural_foreign_keys=True,
use_natural_primary_keys=True)
# Delete one book (to prove that the natural key generation will only
# restore the primary keys of books found in the database via the
# get_natural_key manager method).
james.delete()
# Deserialize and test.
books = list(serializers.deserialize(format, string_data))
self.assertEqual(len(books), 2)
self.assertEqual(books[0].object.title, book1['title'])
self.assertEqual(books[0].object.pk, adrian.pk)
self.assertEqual(books[1].object.title, book2['title'])
self.assertEqual(books[1].object.pk, None)
for format in [f for f in serializers.get_serializer_formats()
if not isinstance(serializers.get_serializer(f), serializers.BadSerializer) and not f == 'geojson']:
setattr(SerializerTests, 'test_' + format + '_serializer', curry(serializerTest, format))
setattr(SerializerTests, 'test_' + format + '_natural_key_serializer', curry(naturalKeySerializerTest, format))
setattr(SerializerTests, 'test_' + format + '_serializer_fields', curry(fieldsTest, format))
setattr(SerializerTests, 'test_' + format + '_serializer_natural_keys', curry(naturalKeyTest, format))
if format != 'python':
setattr(SerializerTests, 'test_' + format + '_serializer_stream', curry(streamTest, format))
class XmlDeserializerSecurityTests(TestCase):
def test_no_dtd(self):
"""
The XML deserializer shouldn't allow a DTD.
This is the most straightforward way to prevent all entity definitions
and avoid both external entities and entity-expansion attacks.
"""
xml = '<?xml version="1.0" standalone="no"?><!DOCTYPE example SYSTEM "http://example.com/example.dtd">'
with self.assertRaises(DTDForbidden):
next(serializers.deserialize('xml', xml))
| |
import json
from collections import defaultdict
from datetime import datetime
from django.conf import settings
import langcodes
import six.moves.urllib.error
import six.moves.urllib.parse
import six.moves.urllib.request
from couchdbkit.exceptions import ResourceNotFound
from crispy_forms.utils import render_crispy_form
from corehq.apps.registry.utils import get_data_registry_dropdown_options
from corehq.apps.sso.models import IdentityProvider
from corehq.apps.sso.utils.user_helpers import get_email_domain_from_username
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.http import (
Http404,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
HttpResponseBadRequest,
)
from django.http.response import HttpResponseServerError
from django.shortcuts import render
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ngettext, ugettext_lazy, ugettext_noop
from corehq.apps.users.analytics import get_role_user_count
from dimagi.utils.couch import CriticalSection
from soil.exceptions import TaskFailedError
from soil.util import expose_cached_download, get_download_context
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.http import require_GET, require_POST
from django_digest.decorators import httpdigest
from django_otp.plugins.otp_static.models import StaticToken
from django_prbac.utils import has_privilege
from memoized import memoized
from corehq import privileges, toggles
from corehq.apps.accounting.decorators import always_allow_project_access, requires_privilege_with_fallback
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.analytics.tasks import (
HUBSPOT_INVITATION_SENT_FORM,
send_hubspot_form,
track_workflow,
)
from corehq.apps.app_manager.dbaccessors import get_app_languages
from corehq.apps.cloudcare.esaccessors import login_as_user_filter
from corehq.apps.custom_data_fields.models import PROFILE_SLUG
from corehq.apps.domain.decorators import (
domain_admin_required,
login_and_domain_required,
require_superuser,
)
from corehq.apps.domain.forms import clean_password
from corehq.apps.domain.models import Domain
from corehq.apps.domain.views.base import BaseDomainView
from corehq.apps.enterprise.models import EnterprisePermissions
from corehq.apps.es import UserES, queries
from corehq.apps.hqwebapp.crispy import make_form_readonly
from corehq.apps.locations.permissions import (
location_safe,
user_can_access_other_user,
)
from corehq.apps.registration.forms import (
AdminInvitesUserForm,
)
from corehq.apps.reports.util import get_possible_reports
from corehq.apps.sms.mixin import BadSMSConfigException
from corehq.apps.sms.verify import (
VERIFICATION__ALREADY_IN_USE,
VERIFICATION__ALREADY_VERIFIED,
VERIFICATION__RESENT_PENDING,
VERIFICATION__WORKFLOW_STARTED,
initiate_sms_verification_workflow,
)
from corehq.apps.translations.models import SMSTranslations
from corehq.apps.userreports.util import has_report_builder_access
from corehq.apps.users.audit.change_messages import UserChangeMessage
from corehq.apps.users.decorators import (
can_use_filtered_user_download,
require_can_edit_or_view_web_users,
require_can_edit_web_users,
require_can_view_roles,
require_permission_to_edit_user,
)
from corehq.apps.users.exceptions import MissingRoleException
from corehq.apps.users.forms import (
BaseUserInfoForm,
CommtrackUserForm,
SetUserPasswordForm,
UpdateUserRoleForm,
)
from corehq.apps.users.landing_pages import get_allowed_landing_pages, validate_landing_page
from corehq.apps.users.models import (
CommCareUser,
CouchUser,
DomainMembershipError,
DomainRemovalRecord,
DomainRequest,
Invitation,
StaticRole,
WebUser,
Permissions,
UserRole,
)
from corehq.apps.users.util import log_user_change
from corehq.apps.users.views.utils import get_editable_role_choices, BulkUploadResponseWrapper
from corehq.apps.user_importer.importer import UserUploadError
from corehq.apps.user_importer.models import UserUploadRecord
from corehq.apps.user_importer.tasks import import_users_and_groups, parallel_user_import
from corehq.const import USER_CHANGE_VIA_WEB
from corehq.pillows.utils import WEB_USER_TYPE
from corehq.toggles import PARALLEL_USER_IMPORTS
from corehq.util.couch import get_document_or_404
from corehq.util.view_utils import json_error
from corehq.util.workbook_json.excel import (
WorkbookJSONError,
WorksheetNotFound,
get_workbook,
)
def _users_context(request, domain):
couch_user = request.couch_user
web_users = WebUser.by_domain(domain)
for user in [couch_user] + list(web_users):
user.current_domain = domain
return {
'web_users': web_users,
'domain': domain,
'couch_user': couch_user,
}
class BaseUserSettingsView(BaseDomainView):
section_name = ugettext_noop("Users")
@property
@memoized
def section_url(self):
return reverse(DefaultProjectUserSettingsView.urlname, args=[self.domain])
@property
@memoized
def couch_user(self):
user = self.request.couch_user
if user:
user.current_domain = self.domain
return user
@property
def main_context(self):
context = super(BaseUserSettingsView, self).main_context
context.update({
'couch_user': self.couch_user,
})
return context
@method_decorator(always_allow_project_access, name='dispatch')
@location_safe
class DefaultProjectUserSettingsView(BaseUserSettingsView):
urlname = "users_default"
@property
@memoized
def redirect(self):
redirect = None
has_project_access = has_privilege(self.request, privileges.PROJECT_ACCESS)
user = CouchUser.get_by_user_id(self.couch_user._id)
if user:
if ((user.has_permission(self.domain, 'edit_commcare_users')
or user.has_permission(self.domain, 'view_commcare_users'))
and has_project_access):
from corehq.apps.users.views.mobile import MobileWorkerListView
redirect = reverse(
MobileWorkerListView.urlname,
args=[self.domain]
)
elif ((user.has_permission(self.domain, 'edit_groups')
or user.has_permission(self.domain, 'view_groups'))
and has_project_access):
from corehq.apps.users.views.mobile import GroupsListView
redirect = reverse(
GroupsListView.urlname,
args=[self.domain]
)
elif (user.has_permission(self.domain, 'edit_web_users')
or user.has_permission(self.domain, 'view_web_users')):
redirect = reverse(
ListWebUsersView.urlname,
args=[self.domain]
)
elif (user.has_permission(self.domain, 'view_roles')
and has_project_access):
from corehq.apps.users.views import ListRolesView
redirect = reverse(
ListRolesView.urlname,
args=[self.domain]
)
elif ((user.has_permission(self.domain, 'edit_locations')
or user.has_permission(self.domain, 'view_locations'))
and has_project_access):
from corehq.apps.locations.views import LocationsListView
redirect = reverse(
LocationsListView.urlname,
args=[self.domain]
)
return redirect
def get(self, request, *args, **kwargs):
if not self.redirect:
raise Http404()
return HttpResponseRedirect(self.redirect)
class BaseEditUserView(BaseUserSettingsView):
@property
@memoized
def page_url(self):
if self.urlname:
return reverse(self.urlname, args=[self.domain, self.editable_user_id])
@property
def parent_pages(self):
return [{
'title': ListWebUsersView.page_title,
'url': reverse(ListWebUsersView.urlname, args=[self.domain]),
}]
@property
def editable_user_id(self):
return self.kwargs.get('couch_user_id')
@property
@memoized
def editable_user(self):
try:
return get_document_or_404(WebUser, self.domain, self.editable_user_id)
except (ResourceNotFound, CouchUser.AccountTypeError):
raise Http404()
@property
def existing_role(self):
try:
role = self.editable_user.get_role(self.domain)
except DomainMembershipError:
raise Http404()
if role is None:
if isinstance(self.editable_user, WebUser):
raise MissingRoleException()
return None
else:
return role.get_qualified_id()
@property
@memoized
def editable_role_choices(self):
return get_editable_role_choices(self.domain, self.request.couch_user, allow_admin_role=False)
@property
def can_change_user_roles(self):
return (
bool(self.editable_role_choices) and
self.request.couch_user.user_id != self.editable_user_id and
(
self.request.couch_user.is_domain_admin(self.domain) or
not self.existing_role or
self.existing_role in [choice[0] for choice in self.editable_role_choices]
)
)
def form_user_update(self):
raise NotImplementedError()
@property
def main_context(self):
context = super(BaseEditUserView, self).main_context
context.update({
'couch_user': self.editable_user,
'form_user_update': self.form_user_update,
'phonenumbers': self.editable_user.phone_numbers_extended(self.request.couch_user),
})
return context
@property
def backup_token(self):
if Domain.get_by_name(self.request.domain).two_factor_auth:
with CriticalSection([f"backup-token-{self.editable_user._id}"]):
device = (self.editable_user.get_django_user()
.staticdevice_set
.get_or_create(name='backup')[0])
token = device.token_set.first()
if token:
return device.token_set.first().token
else:
return device.token_set.create(token=StaticToken.random_token()).token
return None
@property
@memoized
def commtrack_form(self):
if self.request.method == "POST" and self.request.POST['form_type'] == "commtrack":
return CommtrackUserForm(self.request.POST, request=self.request, domain=self.domain)
user_domain_membership = self.editable_user.get_domain_membership(self.domain)
return CommtrackUserForm(
domain=self.domain,
request=self.request,
initial={
'primary_location': user_domain_membership.location_id,
'program_id': user_domain_membership.program_id,
'assigned_locations': user_domain_membership.assigned_location_ids,
},
)
def update_user(self):
if self.form_user_update.is_valid():
old_lang = self.request.couch_user.language
if self.form_user_update.update_user():
# if editing our own account we should also update the language in the session
if self.editable_user._id == self.request.couch_user._id:
new_lang = self.request.couch_user.language
if new_lang != old_lang:
self.request.session['django_language'] = new_lang
return True
def post(self, request, *args, **kwargs):
saved = False
if self.request.POST['form_type'] == "commtrack":
if self.commtrack_form.is_valid():
self.commtrack_form.save(self.editable_user)
saved = True
elif self.request.POST['form_type'] == "update-user":
if self.update_user():
messages.success(self.request, _('Changes saved for user "%s"') % self.editable_user.raw_username)
saved = True
if saved:
return HttpResponseRedirect(self.page_url)
else:
return self.get(request, *args, **kwargs)
class EditWebUserView(BaseEditUserView):
template_name = "users/edit_web_user.html"
urlname = "user_account"
page_title = ugettext_noop("Edit Web User")
@property
def page_name(self):
if self.request.is_view_only:
return _("Edit Web User (View Only)")
return self.page_title
@property
@memoized
def form_user_update(self):
if self.request.method == "POST" and self.request.POST['form_type'] == "update-user":
data = self.request.POST
else:
data = None
form = UpdateUserRoleForm(data=data, domain=self.domain, existing_user=self.editable_user,
request=self.request)
if self.can_change_user_roles:
try:
existing_role = self.existing_role
except MissingRoleException:
existing_role = None
messages.error(self.request, _("""
This user has no role. Please assign this user a role and save.
"""))
form.load_roles(current_role=existing_role, role_choices=self.user_role_choices)
else:
del form.fields['role']
return form
@property
def user_role_choices(self):
role_choices = get_editable_role_choices(self.domain, self.request.couch_user, allow_admin_role=True)
try:
self.existing_role
except MissingRoleException:
role_choices = [('none', _('(none)'))] + role_choices
return role_choices
@property
@memoized
def can_grant_superuser_access(self):
return self.request.couch_user.is_superuser and toggles.SUPPORT.enabled(self.request.couch_user.username)
@property
def page_context(self):
ctx = {
'form_uneditable': BaseUserInfoForm(),
'can_edit_role': self.can_change_user_roles,
}
if self.request.is_view_only:
make_form_readonly(self.commtrack_form)
if (self.request.project.commtrack_enabled or
self.request.project.uses_locations):
ctx.update({'update_form': self.commtrack_form})
if self.can_grant_superuser_access:
ctx.update({'update_permissions': True})
ctx.update({'token': self.backup_token})
idp = IdentityProvider.get_active_identity_provider_by_username(
self.editable_user.username
)
ctx.update({
'has_untrusted_identity_provider': (
not IdentityProvider.does_domain_trust_user(
self.domain,
self.editable_user.username
)
),
'idp_name': idp.name if idp else '',
})
return ctx
@method_decorator(always_allow_project_access)
@method_decorator(require_can_edit_or_view_web_users)
def dispatch(self, request, *args, **kwargs):
return super(EditWebUserView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return super(EditWebUserView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if self.request.is_view_only:
return self.get(request, *args, **kwargs)
if self.request.POST['form_type'] == 'trust-identity-provider':
idp = IdentityProvider.get_active_identity_provider_by_username(
self.editable_user.username
)
if idp:
idp.create_trust_with_domain(
self.domain,
self.request.user.username
)
messages.success(
self.request,
_('Your project space "{domain}" now trusts the SSO '
'Identity Provider "{idp_name}".').format(
domain=self.domain,
idp_name=idp.name,
)
)
return super(EditWebUserView, self).post(request, *args, **kwargs)
def get_domain_languages(domain):
app_languages = get_app_languages(domain)
translations = SMSTranslations.objects.filter(domain=domain).first()
sms_languages = translations.langs if translations else []
domain_languages = []
for lang_code in app_languages.union(sms_languages):
name = langcodes.get_name(lang_code)
label = "{} ({})".format(lang_code, name) if name else lang_code
domain_languages.append((lang_code, label))
return sorted(domain_languages) or langcodes.get_all_langs_for_select()
class BaseRoleAccessView(BaseUserSettingsView):
@property
@memoized
def can_restrict_access_by_location(self):
return self.domain_object.has_privilege(
privileges.RESTRICT_ACCESS_BY_LOCATION)
@property
@memoized
def web_apps_privilege(self):
return self.domain_object.has_privilege(
privileges.CLOUDCARE
)
@property
@memoized
def non_admin_roles(self):
return list(sorted(
UserRole.objects.get_by_domain(self.domain),
key=lambda role: role.name if role.name else '\uFFFF'
))
def get_roles_for_display(self):
show_es_issue = False
role_view_data = [StaticRole.domain_admin(self.domain).to_json()]
for role in self.non_admin_roles:
role_data = role.to_json()
role_view_data.append(role_data)
try:
user_count = get_role_user_count(role.domain, role.couch_id)
role_data["hasUsersAssigned"] = bool(user_count)
except TypeError:
# when query_result['hits'] returns None due to an ES issue
show_es_issue = True
role_data["has_unpermitted_location_restriction"] = (
not self.can_restrict_access_by_location
and not role.permissions.access_all_locations
)
if show_es_issue:
messages.error(
self.request,
mark_safe(_( # nosec: no user input
"We might be experiencing issues fetching the entire list "
"of user roles right now. This issue is likely temporary and "
"nothing to worry about, but if you keep seeing this for "
"more than a day, please <a href='#modalReportIssue' "
"data-toggle='modal'>Report an Issue</a>."
))
)
return role_view_data
@method_decorator(always_allow_project_access, name='dispatch')
@method_decorator(toggles.ENTERPRISE_USER_MANAGEMENT.required_decorator(), name='dispatch')
class EnterpriseUsersView(BaseRoleAccessView):
template_name = 'users/enterprise_users.html'
page_title = ugettext_lazy("Enterprise Users")
urlname = 'enterprise_users'
@property
def page_context(self):
return {
"show_profile_column": domain_has_privilege(self.domain, privileges.APP_USER_PROFILES),
}
@method_decorator(always_allow_project_access, name='dispatch')
@method_decorator(require_can_edit_or_view_web_users, name='dispatch')
class ListWebUsersView(BaseRoleAccessView):
template_name = 'users/web_users.html'
page_title = ugettext_lazy("Web Users")
urlname = 'web_users'
@property
@memoized
def role_labels(self):
return {
r.get_qualified_id(): r.name
for r in [StaticRole.domain_admin(self.domain)] + self.non_admin_roles
}
@property
@memoized
def invitations(self):
return [
{
"uuid": str(invitation.uuid),
"email": invitation.email,
"email_marked_as_bounced": bool(invitation.email_marked_as_bounced),
"invited_on": invitation.invited_on,
"role_label": self.role_labels.get(invitation.role, ""),
"email_status": invitation.email_status,
}
for invitation in Invitation.by_domain(self.domain)
]
@property
def page_context(self):
from corehq.apps.users.views.mobile.users import FilteredWebUserDownload
if can_use_filtered_user_download(self.domain):
bulk_download_url = reverse(FilteredWebUserDownload.urlname, args=[self.domain])
else:
bulk_download_url = reverse("download_web_users", args=[self.domain])
return {
'invitations': self.invitations,
'requests': DomainRequest.by_domain(self.domain) if self.request.couch_user.is_domain_admin else [],
'admins': WebUser.get_admins_by_domain(self.domain),
'domain_object': self.domain_object,
'bulk_download_url': bulk_download_url,
'from_address': settings.DEFAULT_FROM_EMAIL
}
@require_can_edit_or_view_web_users
def download_web_users(request, domain):
track_workflow(request.couch_user.get_email(), 'Bulk download web users selected')
from corehq.apps.users.views.mobile.users import download_users
return download_users(request, domain, user_type=WEB_USER_TYPE)
class DownloadWebUsersStatusView(BaseUserSettingsView):
urlname = 'download_web_users_status'
page_title = ugettext_noop('Download Web Users Status')
@method_decorator(require_can_edit_or_view_web_users)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@property
def parent_pages(self):
return [{
'title': ListWebUsersView.page_title,
'url': reverse(ListWebUsersView.urlname, args=[self.domain]),
}]
def get(self, request, *args, **kwargs):
context = super(DownloadWebUsersStatusView, self).main_context
context.update({
'domain': self.domain,
'download_id': kwargs['download_id'],
'poll_url': reverse('user_download_job_poll', args=[self.domain, kwargs['download_id']]),
'title': _("Download Web Users Status"),
'progress_text': _("Preparing web user download."),
'error_text': _("There was an unexpected error! Please try again or report an issue."),
'next_url': reverse(ListWebUsersView.urlname, args=[self.domain]),
'next_url_text': _("Go back to Web Users"),
})
return render(request, 'hqwebapp/soil_status_full.html', context)
def page_url(self):
return reverse(self.urlname, args=self.args, kwargs=self.kwargs)
class ListRolesView(BaseRoleAccessView):
template_name = 'users/roles_and_permissions.html'
page_title = ugettext_lazy("Roles & Permissions")
urlname = 'roles_and_permissions'
@method_decorator(require_can_view_roles)
def dispatch(self, request, *args, **kwargs):
return super(ListRolesView, self).dispatch(request, *args, **kwargs)
@property
def can_edit_roles(self):
return (has_privilege(self.request, privileges.ROLE_BASED_ACCESS)
and self.couch_user.is_domain_admin)
@property
def landing_page_choices(self):
return [
{'id': None, 'name': _('Use Default')}
] + [
{'id': page.id, 'name': _(page.name)}
for page in get_allowed_landing_pages(self.domain)
]
@property
def page_context(self):
if (not self.can_restrict_access_by_location
and any(not role.permissions.access_all_locations
for role in self.non_admin_roles)):
messages.warning(self.request, _(
"This project has user roles that restrict data access by "
"organization, but the software plan no longer supports that. "
"Any users assigned to roles that are restricted in data access "
"by organization can no longer access this project. Please "
"update the existing roles."))
return {
'user_roles': self.get_roles_for_display(),
'non_admin_roles': self.non_admin_roles,
'can_edit_roles': self.can_edit_roles,
'default_role': StaticRole.domain_default(self.domain),
'report_list': get_possible_reports(self.domain),
'is_domain_admin': self.couch_user.is_domain_admin,
'domain_object': self.domain_object,
'uses_locations': self.domain_object.uses_locations,
'can_restrict_access_by_location': self.can_restrict_access_by_location,
'landing_page_choices': self.landing_page_choices,
'show_integration': (
toggles.OPENMRS_INTEGRATION.enabled(self.domain) or
toggles.DHIS2_INTEGRATION.enabled(self.domain)
),
'web_apps_privilege': self.web_apps_privilege,
'has_report_builder_access': has_report_builder_access(self.request),
'data_file_download_enabled': toggles.DATA_FILE_DOWNLOAD.enabled(self.domain),
'export_ownership_enabled': toggles.EXPORT_OWNERSHIP.enabled(self.domain),
'data_registry_choices': get_data_registry_dropdown_options(self.domain),
}
@always_allow_project_access
@require_can_edit_or_view_web_users
@require_GET
def paginate_enterprise_users(request, domain):
# Get web users
domains = [domain] + EnterprisePermissions.get_domains(domain)
web_users, pagination = _get_web_users(request, domains)
# Get linked mobile users
web_user_usernames = [u.username for u in web_users]
mobile_result = (
UserES().show_inactive().domains(domains).mobile_users().sort('username.exact')
.filter(
queries.nested(
'user_data_es',
login_as_user_filter(web_user_usernames)
)
)
.run()
)
mobile_users = defaultdict(list)
for hit in mobile_result.hits:
login_as_user = {data['key']: data['value'] for data in hit['user_data_es']}.get('login_as_user')
mobile_users[login_as_user].append(CommCareUser.wrap(hit))
users = []
allowed_domains = set(domains) - {domain}
for web_user in web_users:
loginAsUserCount = len(list(filter(lambda m: m['is_active'], mobile_users[web_user.username])))
other_domains = [m.domain for m in web_user.domain_memberships if m.domain in allowed_domains]
users.append({
**_format_enterprise_user(domain, web_user),
'otherDomains': other_domains,
'loginAsUserCount': loginAsUserCount,
'inactiveMobileCount': len(mobile_users[web_user.username]) - loginAsUserCount,
})
for mobile_user in sorted(mobile_users[web_user.username], key=lambda x: x.username):
profile = mobile_user.get_user_data_profile(mobile_user.metadata.get(PROFILE_SLUG))
users.append({
**_format_enterprise_user(mobile_user.domain, mobile_user),
'profile': profile.name if profile else None,
'otherDomains': [mobile_user.domain] if domain != mobile_user.domain else [],
'loginAsUser': web_user.username,
'is_active': mobile_user.is_active,
})
return JsonResponse({
'users': users,
**pagination,
})
# user may be either a WebUser or a CommCareUser
def _format_enterprise_user(domain, user):
membership = user.get_domain_membership(domain)
role = membership.role if membership else None
return {
'username': user.raw_username,
'name': user.full_name,
'id': user.get_id,
'role': role.name if role else None,
}
@always_allow_project_access
@require_can_edit_or_view_web_users
@require_GET
def paginate_web_users(request, domain):
web_users, pagination = _get_web_users(request, [domain])
web_users_fmt = [{
'email': u.get_email(),
'domain': domain,
'name': u.full_name,
'role': u.role_label(domain),
'phoneNumbers': u.phone_numbers,
'id': u.get_id,
'editUrl': reverse('user_account', args=[domain, u.get_id]),
'removeUrl': (
reverse('remove_web_user', args=[domain, u.user_id])
if request.user.username != u.username else None
),
'isUntrustedIdentityProvider': not IdentityProvider.does_domain_trust_user(
domain, u.username
),
} for u in web_users]
return JsonResponse({
'users': web_users_fmt,
**pagination,
})
def _get_web_users(request, domains):
limit = int(request.GET.get('limit', 10))
page = int(request.GET.get('page', 1))
skip = limit * (page - 1)
query = request.GET.get('query')
result = (
UserES().domains(domains).web_users().sort('username.exact')
.search_string_query(query, ["username", "last_name", "first_name"])
.start(skip).size(limit).run()
)
return (
[WebUser.wrap(w) for w in result.hits],
{
'total': result.total,
'page': page,
'query': query,
},
)
@always_allow_project_access
@require_can_edit_web_users
@require_POST
def remove_web_user(request, domain, couch_user_id):
user = WebUser.get_by_user_id(couch_user_id, domain)
# if no user, very likely they just pressed delete twice in rapid succession so
# don't bother doing anything.
if user:
record = user.delete_domain_membership(domain, create_record=True)
user.save()
# web user's membership is bound to the domain, so log as a change for that domain
log_user_change(by_domain=request.domain, for_domain=domain, couch_user=user,
changed_by_user=request.couch_user, changed_via=USER_CHANGE_VIA_WEB,
change_messages=UserChangeMessage.domain_removal(domain))
if record:
message = _('You have successfully removed {username} from your '
'project space. <a href="{url}" class="post-link">Undo</a>')
messages.success(request, message.format(
username=user.username,
url=reverse('undo_remove_web_user', args=[domain, record.get_id])
), extra_tags="html")
else:
message = _('It appears {username} has already been removed from your project space.')
messages.success(request, message.format(username=user.username))
return HttpResponseRedirect(
reverse(ListWebUsersView.urlname, args=[domain]))
@always_allow_project_access
@require_can_edit_web_users
def undo_remove_web_user(request, domain, record_id):
record = DomainRemovalRecord.get(record_id)
record.undo()
messages.success(request, 'You have successfully restored {username}.'.format(
username=WebUser.get_by_user_id(record.user_id).username
))
return HttpResponseRedirect(
reverse(ListWebUsersView.urlname, args=[domain]))
# If any permission less than domain admin were allowed here, having that permission would give you the permission
# to change the permissions of your own role such that you could do anything, and would thus be equivalent to having
# domain admin permissions.
@json_error
@domain_admin_required
@require_POST
def post_user_role(request, domain):
if not domain_has_privilege(domain, privileges.ROLE_BASED_ACCESS):
return JsonResponse({})
role_data = json.loads(request.body.decode('utf-8'))
try:
role = _update_role_from_view(domain, role_data)
except ValueError as e:
return JsonResponse({
"message": str(e)
}, status=400)
response_data = role.to_json()
user_count = get_role_user_count(domain, role.couch_id)
response_data['hasUsersAssigned'] = user_count > 0
return JsonResponse(response_data)
def _update_role_from_view(domain, role_data):
landing_page = role_data["default_landing_page"]
if landing_page:
validate_landing_page(domain, landing_page)
if (
not domain_has_privilege(domain, privileges.RESTRICT_ACCESS_BY_LOCATION)
and not role_data['permissions']['access_all_locations']
):
# This shouldn't be possible through the UI, but as a safeguard...
role_data['permissions']['access_all_locations'] = True
if "_id" in role_data:
try:
role = UserRole.objects.by_couch_id(role_data["_id"])
except UserRole.DoesNotExist:
role = UserRole()
else:
if role.domain != domain:
raise Http404()
else:
role = UserRole()
name = role_data["name"]
if not role.id:
if name.lower() == 'admin' or UserRole.objects.filter(domain=domain, name__iexact=name).exists():
raise ValueError(_("A role with the same name already exists"))
role.domain = domain
role.name = name
role.default_landing_page = landing_page
role.is_non_admin_editable = role_data["is_non_admin_editable"]
role.save()
permissions = Permissions.wrap(role_data["permissions"])
permissions.normalize()
role.set_permissions(permissions.to_list())
assignable_by = role_data["assignable_by"]
role.set_assignable_by_couch(assignable_by)
return role
@domain_admin_required
@require_POST
def delete_user_role(request, domain):
if not domain_has_privilege(domain, privileges.ROLE_BASED_ACCESS):
return JsonResponse({})
role_data = json.loads(request.body.decode('utf-8'))
user_count = get_role_user_count(domain, role_data["_id"])
if user_count:
return JsonResponse({
"message": ngettext(
"Unable to delete role '{role}'. It has one user still assigned to it. "
"Remove all users assigned to the role before deleting it.",
"Unable to delete role '{role}'. It has {user_count} users still assigned to it. "
"Remove all users assigned to the role before deleting it.",
user_count
).format(role=role_data["name"], user_count=user_count)
}, status=400)
try:
role = UserRole.objects.by_couch_id(role_data["_id"], domain=domain)
except UserRole.DoesNotExist:
return JsonResponse({})
copy_id = role.couch_id
role.delete()
# return removed id in order to remove it from UI
return JsonResponse({"_id": copy_id})
@always_allow_project_access
@require_POST
@require_can_edit_web_users
def delete_request(request, domain):
DomainRequest.objects.get(id=request.POST['id']).delete()
return JsonResponse({'status': 'ok'})
@always_allow_project_access
@require_POST
@require_can_edit_web_users
def check_sso_trust(request, domain):
username = request.POST['username']
is_trusted = IdentityProvider.does_domain_trust_user(domain, username)
response = {
'is_trusted': is_trusted,
}
if not is_trusted:
response.update({
'email_domain': get_email_domain_from_username(username),
'idp_name': IdentityProvider.get_active_identity_provider_by_username(
username
).name,
})
return JsonResponse(response)
class BaseManageWebUserView(BaseUserSettingsView):
@method_decorator(always_allow_project_access)
@method_decorator(require_can_edit_web_users)
def dispatch(self, request, *args, **kwargs):
return super(BaseManageWebUserView, self).dispatch(request, *args, **kwargs)
@property
def parent_pages(self):
return [{
'title': ListWebUsersView.page_title,
'url': reverse(ListWebUsersView.urlname, args=[self.domain]),
}]
class InviteWebUserView(BaseManageWebUserView):
template_name = "users/invite_web_user.html"
urlname = 'invite_web_user'
page_title = ugettext_lazy("Invite Web User to Project")
@property
@memoized
def invite_web_user_form(self):
role_choices = get_editable_role_choices(self.domain, self.request.couch_user, allow_admin_role=True)
loc = None
domain_request = DomainRequest.objects.get(id=self.request_id) if self.request_id else None
is_add_user = self.request_id is not None
initial = {
'email': domain_request.email if domain_request else None,
}
if 'location_id' in self.request.GET:
from corehq.apps.locations.models import SQLLocation
loc = SQLLocation.objects.get(location_id=self.request.GET.get('location_id'))
if self.request.method == 'POST':
current_users = [user.username for user in WebUser.by_domain(self.domain)]
pending_invites = [di.email for di in Invitation.by_domain(self.domain)]
return AdminInvitesUserForm(
self.request.POST,
excluded_emails=current_users + pending_invites,
role_choices=role_choices,
domain=self.domain,
is_add_user=is_add_user,
)
return AdminInvitesUserForm(
initial=initial,
role_choices=role_choices,
domain=self.domain,
location=loc,
is_add_user=is_add_user,
)
@property
@memoized
def request_id(self):
if 'request_id' in self.request.GET:
return self.request.GET.get('request_id')
return None
@property
def page_context(self):
return {
'registration_form': self.invite_web_user_form,
}
def post(self, request, *args, **kwargs):
if self.invite_web_user_form.is_valid():
# If user exists and has already requested access, just add them to the project
# Otherwise, send an invitation
create_invitation = True
data = self.invite_web_user_form.cleaned_data
domain_request = DomainRequest.by_email(self.domain, data["email"])
if domain_request is not None:
domain_request.is_approved = True
domain_request.save()
user = CouchUser.get_by_username(domain_request.email)
if user is not None:
domain_request.send_approval_email()
create_invitation = False
user.add_as_web_user(self.domain, role=data["role"],
location_id=data.get("supply_point", None),
program_id=data.get("program", None))
messages.success(request, "%s added." % data["email"])
else:
track_workflow(request.couch_user.get_email(),
"Sent a project invitation",
{"Sent a project invitation": "yes"})
send_hubspot_form(HUBSPOT_INVITATION_SENT_FORM, request)
messages.success(request, "Invitation sent to %s" % data["email"])
if create_invitation:
data["invited_by"] = request.couch_user.user_id
data["invited_on"] = datetime.utcnow()
data["domain"] = self.domain
invite = Invitation(**data)
invite.save()
invite.send_activation_email()
# Ensure trust is established with Invited User's Identity Provider
if not IdentityProvider.does_domain_trust_user(self.domain, data["email"]):
idp = IdentityProvider.get_active_identity_provider_by_username(data["email"])
idp.create_trust_with_domain(self.domain, self.request.user.username)
return HttpResponseRedirect(reverse(
ListWebUsersView.urlname,
args=[self.domain]
))
return self.get(request, *args, **kwargs)
class BaseUploadUser(BaseUserSettingsView):
def post(self, request, *args, **kwargs):
"""View's dispatch method automatically calls this"""
try:
self.workbook = get_workbook(request.FILES.get('bulk_upload_file'))
except WorkbookJSONError as e:
messages.error(request, str(e))
return self.get(request, *args, **kwargs)
try:
self.user_specs = self.workbook.get_worksheet(title='users')
except WorksheetNotFound:
try:
self.user_specs = self.workbook.get_worksheet()
except WorksheetNotFound:
return HttpResponseBadRequest("Workbook has no worksheets")
try:
self.group_specs = self.workbook.get_worksheet(title='groups')
except WorksheetNotFound:
self.group_specs = []
try:
from corehq.apps.user_importer.importer import check_headers
check_headers(self.user_specs, self.domain, is_web_upload=self.is_web_upload)
except UserUploadError as e:
messages.error(request, _(str(e)))
return HttpResponseRedirect(reverse(self.urlname, args=[self.domain]))
task_ref = expose_cached_download(payload=None, expiry=1 * 60 * 60, file_extension=None)
if PARALLEL_USER_IMPORTS.enabled(self.domain) and not self.is_web_upload:
if list(self.group_specs):
messages.error(
request,
_("Groups are not allowed with parallel user import. Please upload them separately")
)
return HttpResponseRedirect(reverse(self.urlname, args=[self.domain]))
task = parallel_user_import.delay(
self.domain,
list(self.user_specs),
request.couch_user
)
else:
upload_record = UserUploadRecord(
domain=self.domain,
user_id=request.couch_user.user_id
)
upload_record.save()
task = import_users_and_groups.delay(
self.domain,
list(self.user_specs),
list(self.group_specs),
request.couch_user,
upload_record.pk,
self.is_web_upload
)
task_ref.set_task(task)
if self.is_web_upload:
return HttpResponseRedirect(
reverse(
WebUserUploadStatusView.urlname,
args=[self.domain, task_ref.download_id]
)
)
else:
from corehq.apps.users.views.mobile import UserUploadStatusView
return HttpResponseRedirect(
reverse(
UserUploadStatusView.urlname,
args=[self.domain, task_ref.download_id]
)
)
class UploadWebUsers(BaseUploadUser):
template_name = 'hqwebapp/bulk_upload.html'
urlname = 'upload_web_users'
page_title = ugettext_noop("Bulk Upload Web Users")
is_web_upload = True
@method_decorator(always_allow_project_access)
@method_decorator(require_can_edit_web_users)
@method_decorator(requires_privilege_with_fallback(privileges.BULK_USER_MANAGEMENT))
def dispatch(self, request, *args, **kwargs):
return super(UploadWebUsers, self).dispatch(request, *args, **kwargs)
@property
def page_context(self):
request_params = self.request.GET if self.request.method == 'GET' else self.request.POST
from corehq.apps.users.views.mobile import get_user_upload_context
return get_user_upload_context(self.domain, request_params, "download_web_users", "web user", "web users")
def post(self, request, *args, **kwargs):
track_workflow(request.couch_user.get_email(), 'Bulk upload web users selected')
return super(UploadWebUsers, self).post(request, *args, **kwargs)
class WebUserUploadStatusView(BaseManageWebUserView):
urlname = 'web_user_upload_status'
page_title = ugettext_noop('Web User Upload Status')
def get(self, request, *args, **kwargs):
context = super(WebUserUploadStatusView, self).main_context
context.update({
'domain': self.domain,
'download_id': kwargs['download_id'],
'poll_url': reverse(WebUserUploadJobPollView.urlname, args=[self.domain, kwargs['download_id']]),
'title': _("Web User Upload Status"),
'progress_text': _("Importing your data. This may take some time..."),
'error_text': _("Problem importing data! Please try again or report an issue."),
'next_url': reverse(ListWebUsersView.urlname, args=[self.domain]),
'next_url_text': _("Return to manage web users"),
})
return render(request, 'hqwebapp/soil_status_full.html', context)
def page_url(self):
return reverse(self.urlname, args=self.args, kwargs=self.kwargs)
class UserUploadJobPollView(BaseUserSettingsView):
def get(self, request, domain, download_id):
try:
context = get_download_context(download_id)
except TaskFailedError:
return HttpResponseServerError()
context.update({
'on_complete_short': _('Bulk upload complete.'),
'on_complete_long': _(self.on_complete_long),
'user_type': _(self.user_type),
})
context['result'] = BulkUploadResponseWrapper(context)
return render(request, 'users/mobile/partials/user_upload_status.html', context)
class WebUserUploadJobPollView(UserUploadJobPollView, BaseManageWebUserView):
urlname = "web_user_upload_job_poll"
on_complete_long = 'Web Worker upload has finished'
user_type = 'web users'
@method_decorator(require_can_edit_web_users)
def dispatch(self, request, *args, **kwargs):
return super(WebUserUploadJobPollView, self).dispatch(request, *args, **kwargs)
@require_POST
@always_allow_project_access
@require_permission_to_edit_user
def make_phone_number_default(request, domain, couch_user_id):
user = CouchUser.get_by_user_id(couch_user_id, domain)
if not user.is_current_web_user(request) and not user.is_commcare_user():
raise Http404()
phone_number = request.POST['phone_number']
if not phone_number:
raise Http404('Must include phone number in request.')
user.set_default_phone_number(phone_number)
from corehq.apps.users.views.mobile import EditCommCareUserView
redirect = reverse(EditCommCareUserView.urlname, args=[domain, couch_user_id])
return HttpResponseRedirect(redirect)
@require_POST
@always_allow_project_access
@require_permission_to_edit_user
def delete_phone_number(request, domain, couch_user_id):
user = CouchUser.get_by_user_id(couch_user_id, domain)
if not user.is_current_web_user(request) and not user.is_commcare_user():
raise Http404()
phone_number = request.POST['phone_number']
if not phone_number:
raise Http404('Must include phone number in request.')
user.delete_phone_number(phone_number)
log_user_change(
by_domain=request.domain,
for_domain=user.domain,
couch_user=user,
changed_by_user=request.couch_user,
changed_via=USER_CHANGE_VIA_WEB,
change_messages=UserChangeMessage.phone_numbers_removed([phone_number])
)
from corehq.apps.users.views.mobile import EditCommCareUserView
redirect = reverse(EditCommCareUserView.urlname, args=[domain, couch_user_id])
return HttpResponseRedirect(redirect)
@always_allow_project_access
@require_permission_to_edit_user
def verify_phone_number(request, domain, couch_user_id):
"""
phone_number cannot be passed in the url due to special characters
but it can be passed as %-encoded GET parameters
"""
if 'phone_number' not in request.GET:
raise Http404('Must include phone number in request.')
phone_number = six.moves.urllib.parse.unquote(request.GET['phone_number'])
user = CouchUser.get_by_user_id(couch_user_id, domain)
try:
result = initiate_sms_verification_workflow(user, phone_number)
except BadSMSConfigException as error:
messages.error(request, _('Bad SMS configuration: {error}').format(error=error))
else:
if result == VERIFICATION__ALREADY_IN_USE:
messages.error(request, _('Cannot start verification workflow. Phone number is already in use.'))
elif result == VERIFICATION__ALREADY_VERIFIED:
messages.error(request, _('Phone number is already verified.'))
elif result == VERIFICATION__RESENT_PENDING:
messages.success(request, _('Verification message resent.'))
elif result == VERIFICATION__WORKFLOW_STARTED:
messages.success(request, _('Verification workflow started.'))
from corehq.apps.users.views.mobile import EditCommCareUserView
redirect = reverse(EditCommCareUserView.urlname, args=[domain, couch_user_id])
return HttpResponseRedirect(redirect)
@always_allow_project_access
@require_superuser
@login_and_domain_required
def domain_accounts(request, domain, couch_user_id, template="users/domain_accounts.html"):
context = _users_context(request, domain)
couch_user = WebUser.get_by_user_id(couch_user_id, domain)
if request.method == "POST" and 'domain' in request.POST:
domain = request.POST['domain']
couch_user.add_domain_membership(domain)
couch_user.save()
messages.success(request, 'Domain added')
context.update({"user": request.user})
return render(request, template, context)
@always_allow_project_access
@require_POST
@require_superuser
def add_domain_membership(request, domain, couch_user_id, domain_name):
user = WebUser.get_by_user_id(couch_user_id, domain)
if domain_name:
user.add_domain_membership(domain_name)
user.save()
return HttpResponseRedirect(reverse("user_account", args=(domain, couch_user_id)))
@always_allow_project_access
@sensitive_post_parameters('new_password1', 'new_password2')
@login_and_domain_required
@location_safe
def change_password(request, domain, login_id):
# copied from auth's password_change
commcare_user = CommCareUser.get_by_user_id(login_id, domain)
json_dump = {}
if not commcare_user or not user_can_access_other_user(domain, request.couch_user, commcare_user):
raise Http404()
django_user = commcare_user.get_django_user()
if request.method == "POST":
form = SetUserPasswordForm(request.project, login_id, user=django_user, data=request.POST)
input = request.POST['new_password1']
if input == request.POST['new_password2']:
if form.project.strong_mobile_passwords:
try:
clean_password(input)
except ValidationError:
json_dump['status'] = 'weak'
if form.is_valid():
form.save()
log_user_change(
by_domain=domain,
for_domain=commcare_user.domain,
couch_user=commcare_user,
changed_by_user=request.couch_user,
changed_via=USER_CHANGE_VIA_WEB,
change_messages=UserChangeMessage.password_reset()
)
json_dump['status'] = 'OK'
form = SetUserPasswordForm(request.project, login_id, user='')
else:
json_dump['status'] = 'different'
else:
form = SetUserPasswordForm(request.project, login_id, user=django_user)
json_dump['formHTML'] = render_crispy_form(form)
return HttpResponse(json.dumps(json_dump))
@httpdigest
@login_and_domain_required
def test_httpdigest(request, domain):
return HttpResponse("ok")
@always_allow_project_access
@csrf_exempt
@require_POST
@require_superuser
def register_fcm_device_token(request, domain, couch_user_id, device_token):
user = WebUser.get_by_user_id(couch_user_id)
user.fcm_device_token = device_token
user.save()
return HttpResponse()
| |
#!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def adduplinkset(con, net, name, ligname, networks, utype, etype,
native, icports, lacp, connection):
net_uris = []
native_uri = []
port_uris = []
ligs = net.get_ligs()
# Locate the user supplied LIG
for tlig in ligs:
if tlig['name'] == ligname:
lig = tlig
print('Using Logical Interconnect Group: ', ligname)
if not lig:
print('Error can not locate Logical Interconnect Group: ', ligname)
sys.exit()
# Locate the user supplied networks
if networks:
if utype == 'FibreChannel':
fcnets = net.get_fc_networks()
for fcnet in fcnets:
if fcnet['name'] in networks:
print('Adding FC Network: ', fcnet['name'])
net_uris.append(fcnet['uri'])
if utype == 'Ethernet':
enets = net.get_enet_networks()
for enet in enets:
if enet['name'] in networks:
print('Adding Ethernet Network: ', enet['name'])
net_uris.append(enet['uri'])
if native:
if enet['name'] in native:
native_uri = enet['uri']
if native and not native_uri:
print('Error can not locate the native network: ', native)
if not native_uri:
native_uri = None
# Validate the use supplied Bay and Port options
bay_list = set()
ics = {}
if icports:
for items in icports:
bay, port = items.split(':')
bay = int(bay)
bay_list.add(bay)
port = int(port)
if bay < 1 or bay > 8:
print('Error, invalid BAY specified: ', items)
sys.exit()
if port < 1 or port > 10:
print('Error, invalid PORT specified: ', items)
sys.exit()
# Find the interconnect modules that are installed in the bays
# and store that in a dictionary of {Bay, Interconnect URI}
icmap = lig['interconnectMapTemplate']['interconnectMapEntryTemplates']
for interconnects in icmap:
for item in interconnects['logicalLocation']['locationEntries']:
if item['type'] == 'Bay' and item['relativeValue'] in bay_list:
ics[int(item['relativeValue'])] = interconnects['permittedInterconnectTypeUri']
# Iterate through the bay and ports supplied by the user and lookup
# the corresponding protConfigInfos port number for each port in
# each bay to create the list of portConfigInfo URI's
for items in icports:
bay, pn = items.split(':')
ictype = con.get_by_uri(ics[int(bay)])
for port in ictype['portInfos']:
if port['portName'] == 'X' + pn:
print('Adding Interconnect Bay: %s Port: %s (%s)' %
(bay, pn, port['portNumber']))
port_uris.append(hpov.common.make_port_config_info(1,
int(bay), port['portNumber']))
# Create a new uplink set to append to the logical interconnect group
uset = hpov.common.make_UplinkSetGroupV2(name,
ethernetNetworkType=etype,
lacpTimer=lacp,
logicalPortConfigInfos=port_uris,
mode=connection,
nativeNetworkUri=native_uri,
networkType=utype,
networkUris=net_uris)
lig['uplinkSets'].append(uset)
lig = net.update_lig(lig)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Define new Uplink Set
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HP OneView Authorized Login Domain''')
parser.add_argument('-n', dest='uplink_set_name', required=True,
help='''
Name of the uplink set''')
parser.add_argument('-i', dest='logical_interconnect_group_name',
required=True,
help='''
Name of the associated Logical Interconnect Group''')
parser.add_argument('-l', dest='list_of_networks', required=False,
nargs='+',
help='''
List of network names to add to the uplink set, encapsulated with quotes
and seperated by spaces. For example:
-l "Net One" "Net Two" "Net Three"''')
parser.add_argument('-t', dest='uplink_type', choices=['Ethernet',
'FibreChannel'], required=True,
help='''
Uplink Type''')
parser.add_argument('-e', dest='ethernet_type', choices=['Tagged',
'Tunnel', 'Untagged'], required=False,
default='Tagged',
help='''
Ethernet Type''')
parser.add_argument('-x', dest='native_network', required=False,
help='''
Name of the network to be marked as native''')
parser.add_argument('-o', dest='uplink_ports', required=False,
nargs='+',
help='''
List of uplink ports connected to the uplink sets specified as BAY:PORT
and seperated by spaces. For example BAY 1 PORT X2 and BAY 1 PORT X3
would be specified as:
-o 1:2 1:3''')
parser.add_argument('-m', dest='lacp_mode', required=False,
choices=['Long', 'Short'], default='Long',
help='''
LACP mode on ETHERNET uplink ports''')
parser.add_argument('-g', dest='connection_mode', choices=['Auto',
'FailOver'], required=False, default='Auto',
help='''
Ethernet connection mode''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
net = hpov.networking(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
adduplinkset(con, net, args.uplink_set_name,
args.logical_interconnect_group_name, args.list_of_networks,
args.uplink_type, args.ethernet_type, args.native_network,
args.uplink_ports, args.lacp_mode, args.connection_mode)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| |
#!/usr/bin/python
import optparse, os, sys
from biokbase.probabilistic_annotation.DataExtractor import *
from biokbase.probabilistic_annotation.DataParser import *
# Common variables...
from biokbase.probabilistic_annotation.PYTHON_GLOBALS import *
usage="%prog [options]"
description="""Main driver to get data needed out of the KBase and store it locally.
Data will be stored in a local database autoReconInfo
All of this data is QUERY INDEPENDENT. It should all be the same
for any organism for which you want to do a reconstruction..."""
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option("-r", "--regenerate", help="Regenerate database if it already exists (NOTE - takes a long time)", action="store_true", dest="regenerate", default=False)
parser.add_option("-d", "--deleteonly", help="Delete data files but do not regenerate (WARNING - this is not reversible)", action="store_true", dest="delete", default=False)
parser.add_option("-v", "--verbose", help="Display all WARNINGS (D: Only display messages related to completeness)", action="store_true", dest="verbose", default=False)
parser.add_option("-f", "--folder", help="Base directory (folder) in which all of the data files are to be stored", action="store", dest="folder", default=None)
(options, args) = parser.parse_args()
if options.folder is None:
sys.stderr.write("ERROR: In ExtractorDriver.py - folder (-f) is a required argument\n")
exit(2)
def safeRemove(fname, dirname):
totalfname = os.path.join(dirname, fname)
try:
# Check for file existence
fid = open(totalfname, "r")
fid.close()
os.remove(totalfname)
# If there is still an OSError despite the file existing we want to raise that, it will probably
# cause problems trying to write to the files anyway. but an IOError just means the file isn't there.
except IOError:
pass
if options.regenerate or options.delete:
safeRemove(OTU_ID_FILE, options.folder)
safeRemove(SUBSYSTEM_FID_FILE, options.folder)
safeRemove(DLIT_FID_FILE, options.folder)
safeRemove(CONCATINATED_FID_FILE, options.folder)
safeRemove(SUBSYSTEM_OTU_FIDS_FILE, options.folder)
safeRemove(SUBSYSTEM_OTU_FID_ROLES_FILE, options.folder)
safeRemove(SUBSYSTEM_OTU_FASTA_FILE, options.folder)
safeRemove(SUBSYSTEM_OTU_FASTA_FILE + ".psq", options.folder)
safeRemove(SUBSYSTEM_OTU_FASTA_FILE + ".pin", options.folder)
safeRemove(SUBSYSTEM_OTU_FASTA_FILE + ".phr", options.folder)
safeRemove(OTU_NEIGHBORHOOD_FILE, options.folder)
safeRemove(COMPLEXES_ROLES_FILE, options.folder)
safeRemove(REACTION_COMPLEXES_FILE, options.folder)
# folder = os.path.join("data", "OTU")
# for the_file in os.listdir(folder):
# file_path = os.path.join(folder, the_file)
# if os.path.isfile(file_path):
# os.unlink(file_path)
# Our job is done if all we want to do is delete files.
if options.delete:
exit(0)
sys.stderr.write("Generating requested data:....\n")
############
# Get lists of OTUs
############
sys.stderr.write("OTU data...")
try:
if options.verbose:
sys.stderr.write("reading from file...")
otus, prokotus = readOtuData(options.folder)
except IOError:
if options.verbose:
sys.stderr.write("failed...generating file...")
otus, prokotus = getOtuGenomeIds(MINN, COUNT)
# otus, prokotus = getOtuGenomeIds(MINN, 1200)
writeOtuData(otus, prokotus, options.folder)
sys.stderr.write("done\n")
############
# Get a list of subsystem FIDs
############
sys.stderr.write("List of subsystem FIDS...")
try:
if options.verbose:
sys.stderr.write("reading from file...")
sub_fids = readSubsystemFids(options.folder)
except IOError:
if options.verbose:
sys.stderr.write("failed...generating file...")
sub_fids = subsystemFids(MINN, COUNT)
# NOTE - This is a TEMPORARY workaround for an issue with
# the KBase subsystem load. This function WILL BE DELETED
# and reverted to the call above once that issue is fixed...
# sub_fids = subsystemFids_WORKAROUND(MINN, COUNT)
writeSubsystemFids(sub_fids, options.folder)
sys.stderr.write("done\n")
###########
# ALso get a list of Dlit FIDs
# We include these because having them
# greatly expands the number of roles for which we
# have representatives.
##########
sys.stderr.write("Getting a list of DLit FIDs...")
try:
if options.verbose:
sys.stderr.write("reading from file...")
dlit_fids = readDlitFids(options.folder)
except IOError:
if options.verbose:
sys.stderr.write("failed...generating file...")
dlit_fids = getDlitFids(MINN, COUNT)
writeDlitFids(dlit_fids, options.folder)
sys.stderr.write("done\n")
##########
# Concatinate the two FID lists before filtering
# (Note - doing so after would be possible as well but
# can lead to the same kinds of biases as not filtering
# the subsystems... Im not sure the problem would
# be as bad for these though)
##########
sys.stderr.write("Combining lists of subsystem and DLit FIDS...")
fn = os.path.join(options.folder, CONCATINATED_FID_FILE)
try:
if options.verbose:
sys.stderr.write("reading from file...")
all_fids = set()
for line in open(fn, "r"):
all_fids.add(line.strip("\r\n"))
all_fids = list(all_fids)
except IOError:
if options.verbose:
sys.stderr.write("failed...generating file...")
all_fids = list(set(sub_fids + dlit_fids))
f = open(fn, "w")
for fid in all_fids:
f.write("%s\n" %(fid))
f.close()
sys.stderr.write("done\n")
#############
# Identify roles for the OTU genes in the organism...
#############
sys.stderr.write("Roles for un-filtered list...")
try:
if options.verbose:
sys.stderr.write("reading from file...")
all_fidsToRoles, all_rolesToFids = readAllFidRoles(options.folder)
except IOError:
if options.verbose:
sys.stderr.write("failed...generating file...")
all_fidsToRoles, all_rolesToFids = fidsToRoles(all_fids)
writeAllFidRoles(all_fidsToRoles, options.folder)
sys.stderr.write("done\n")
#############
# Filter the subsystem FIDs by organism... we only want OTU genes.
# Unlike the neighborhood analysis, we don't want to include only
# prokaryotes here.
#############
sys.stderr.write("Filtered list by OTUs...")
try:
if options.verbose:
sys.stderr.write("reading from file...")
otu_fidsToRoles, otu_rolesToFids = readFilteredOtuRoles(options.folder)
except IOError:
if options.verbose:
sys.stderr.write("failed...generating file...")
otudict = getOtuGenomeDictionary(MINN, COUNT)
otu_fidsToRoles, otu_rolesToFids, missing_roles = filterFidsByOtusBetter(all_fidsToRoles, all_rolesToFids, otudict)
writeFilteredOtuRoles(otu_fidsToRoles, options.folder)
sys.stderr.write("done\n")
#############
# Generate a FASTA file
# for the fids in fidsToRoles
#############
sys.stderr.write("Subsystem FASTA file...")
try:
if options.verbose:
sys.stderr.write("reading from file...")
readSubsystemFasta(options.folder)
except IOError:
if options.verbose:
sys.stderr.write("failed...generating file...")
fidsToSeqs = fidsToSequences(otu_fidsToRoles.keys())
writeSubsystemFasta(fidsToSeqs, options.folder)
sys.stderr.write("done\n")
#############
# Get neighborhood info
# for the OTUs (prokaryote only because neighborhoods
# are generally not conserved for eukaryotes)
#############
#sys.stderr.write("OTU neighborhoods...\n")
#try:
# fid = open(OTU_NEIGHBORHOOD_FILE, "r")
# fid.close()
#except IOError:
# tuplist: [ (contig_id, feature_id, start_location, strand) ]
# Final file has this and then the roles in a delimited list
# for prokotu in prokotus:
# This is mostly because I keep running into incredibly stupid errors.
# Lets see if I can figure out what the hell is causing them.
# try:
# fid = open(os.path.join("data", "OTU", prokotu), "r")
# fid.close()
# except IOError:
# tuplist, fidToRoles = getGenomeNeighborhoodsAndRoles([prokotu])
# writeOtuNeighborhoods(tuplist, fidToRoles, options.verbose, os.path.join("data", "OTU", prokotu))
# cmd = "cat %s%s* > %s" %(os.path.join("data", "OTU"), os.sep, OTU_NEIGHBORHOOD_FILE)
# os.system(cmd)
################
# Complexes --> Roles
# Needed to go from annotation likelihoods
# to reaction likelihoods
# Note that it is easier to go in this direction
# Because we need all the roles in a complex to get the probability of that complex.
#
################
sys.stderr.write("Complexes to roles...")
try:
if options.verbose:
sys.stderr.write("reading from file...")
complexToRequiredRoles = readComplexRoles(options.folder)
except IOError:
if options.verbose:
sys.stderr.write("failed...generating file...")
complexToRequiredRoles, requiredRolesToComplexes = complexRoleLinks(MINN, COUNT)
writeComplexRoles(complexToRequiredRoles, options.folder)
sys.stderr.write("done\n")
########
# reaction --> complex
# Again it is easier to go in this direction since we'll be filtering multiple complexes down to a single reaction.
#######
sys.stderr.write("Reactions to complexes...")
try:
if options.verbose:
sys.stderr.write("reading from file...")
rxnToComplexes = readReactionComplex(options.folder)
except IOError:
if options.verbose:
sys.stderr.write("failed...generating file...")
rxnToComplexes, complexesToReactions = reactionComplexLinks(MINN, COUNT)
writeReactionComplex(rxnToComplexes, options.folder)
sys.stderr.write("done\n")
sys.stderr.write("Data gathering done...\n")
| |
from .logs import LoggingMixin
class EngineIONamespace(LoggingMixin):
'Define engine.io client behavior'
def __init__(self, io):
self._io = io
self._callback_by_event = {}
self._log_name = io._url
self.initialize()
def initialize(self):
"""Initialize custom variables here.
You can override this method."""
def on(self, event, callback):
'Define a callback to handle an event emitted by the server'
self._callback_by_event[event] = callback
def send(self, data):
'Send a message'
self._io.send(data)
def on_open(self):
"""Called after engine.io connects.
You can override this method."""
def on_close(self):
"""Called after engine.io disconnects.
You can override this method."""
def on_ping(self, data):
"""Called after engine.io sends a ping packet.
You can override this method."""
def on_pong(self, data):
"""Called after engine.io sends a pong packet.
You can override this method."""
def on_message(self, data):
"""Called after engine.io sends a message packet.
You can override this method."""
def on_upgrade(self):
"""Called after engine.io sends an upgrade packet.
You can override this method."""
def on_noop(self):
"""Called after engine.io sends a noop packet.
You can override this method."""
def _find_packet_callback(self, event):
# Check callbacks defined by on()
try:
return self._callback_by_event[event]
except KeyError:
pass
# Check callbacks defined explicitly
return getattr(self, 'on_' + event)
class SocketIONamespace(EngineIONamespace):
'Define socket.io client behavior'
def __init__(self, io, path):
self.path = path
super(SocketIONamespace, self).__init__(io)
def connect(self):
self._io.connect(self.path)
def disconnect(self):
self._io.disconnect(self.path)
def emit(self, event, *args, **kw):
self._io.emit(event, path=self.path, *args, **kw)
def send(self, data='', callback=None):
self._io.send(data, callback)
def on_connect(self):
"""Called after socket.io connects.
You can override this method."""
def on_reconnect(self):
"""Called after socket.io reconnects.
You can override this method."""
def on_disconnect(self):
"""Called after socket.io disconnects.
You can override this method."""
def on_event(self, event, *args):
"""
Called if there is no matching event handler.
You can override this method.
There are three ways to define an event handler:
- Call socketIO.on()
socketIO = SocketIO('localhost', 8000)
socketIO.on('my_event', my_function)
- Call namespace.on()
namespace = socketIO.get_namespace()
namespace.on('my_event', my_function)
- Define namespace.on_xxx
class Namespace(SocketIONamespace):
def on_my_event(self, *args):
my_function(*args)
socketIO.define(Namespace)"""
def on_error(self, data):
"""Called after socket.io sends an error packet.
You can override this method."""
def _find_packet_callback(self, event):
# Interpret events
if event == 'connect':
if not hasattr(self, '_was_connected'):
self._was_connected = True
else:
event = 'reconnect'
# Check callbacks defined by on()
try:
return self._callback_by_event[event]
except KeyError:
pass
# Check callbacks defined explicitly or use on_event()
return getattr(
self, 'on_' + event.replace(' ', '_'),
lambda *args: self.on_event(event, *args))
class LoggingEngineIONamespace(EngineIONamespace):
def on_open(self):
self._debug('[engine.io open]')
super(LoggingEngineIONamespace, self).on_open()
def on_close(self):
self._debug('[engine.io close]')
super(LoggingEngineIONamespace, self).on_close()
def on_ping(self, data):
self._debug('[engine.io ping] %s', data)
super(LoggingEngineIONamespace, self).on_ping(data)
def on_pong(self, data):
self._debug('[engine.io pong] %s', data)
super(LoggingEngineIONamespace, self).on_pong(data)
def on_message(self, data):
self._debug('[engine.io message] %s', data)
super(LoggingEngineIONamespace, self).on_message(data)
def on_upgrade(self):
self._debug('[engine.io upgrade]')
super(LoggingEngineIONamespace, self).on_upgrade()
def on_noop(self):
self._debug('[engine.io noop]')
super(LoggingEngineIONamespace, self).on_noop()
def on_event(self, event, *args):
callback, args = find_callback(args)
arguments = [repr(_) for _ in args]
if callback:
arguments.append('callback(*args)')
self._info('[engine.io event] %s(%s)', event, ', '.join(arguments))
super(LoggingEngineIONamespace, self).on_event(event, *args)
class LoggingSocketIONamespace(SocketIONamespace, LoggingEngineIONamespace):
def on_connect(self):
self._debug(
'%s[socket.io connect]', _make_logging_header(self.path))
super(LoggingSocketIONamespace, self).on_connect()
def on_reconnect(self):
self._debug(
'%s[socket.io reconnect]', _make_logging_header(self.path))
super(LoggingSocketIONamespace, self).on_reconnect()
def on_disconnect(self):
self._debug(
'%s[socket.io disconnect]', _make_logging_header(self.path))
super(LoggingSocketIONamespace, self).on_disconnect()
def on_event(self, event, *args):
callback, args = find_callback(args)
arguments = [repr(_) for _ in args]
if callback:
arguments.append('callback(*args)')
self._info(
'%s[socket.io event] %s(%s)', _make_logging_header(self.path),
event, ', '.join(arguments))
super(LoggingSocketIONamespace, self).on_event(event, *args)
def on_error(self, data):
self._debug(
'%s[socket.io error] %s', _make_logging_header(self.path), data)
super(LoggingSocketIONamespace, self).on_error(data)
def find_callback(args, kw=None):
'Return callback whether passed as a last argument or as a keyword'
if args and callable(args[-1]):
return args[-1], args[:-1]
try:
return kw['callback'], args
except (KeyError, TypeError):
return None, args
def _make_logging_header(path):
return path + ' ' if path else ''
| |
# Targets not used, not possible to condition generated sequences
import data_utils_2
import pandas as pd
import numpy as np
import tensorflow as tf
import math, random, itertools
import pickle
import time
import json
import os
import math
from data_utils_2 import get_data
import tensorflow as tf
import numpy as np
from sklearn.metrics import roc_curve, auc, precision_recall_curve
import matplotlib
# change backend so that it can plot figures without an X-server running.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import math, time, json, random
import glob
import copy
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-learning_rate', type=float)
parser.add_argument('-optimizer_str', type=str)
parser.add_argument('-hidden_units_dec', type=int)
parser.add_argument('-hidden_units_enc', type=int)
parser.add_argument('-emb_dim', type=int)
parser.add_argument('-mult', type=float)
parser.add_argument('-experiment_id', type=str)
args = parser.parse_args()
experiment_id = './' + args.experiment_id
# directory where the data will be saved
if not os.path.isdir(experiment_id):
os.mkdir(experiment_id)
# function for getting one mini batch
def get_batch(samples, batch_idx, batch_size):
start_pos = batch_idx * batch_size
end_pos = start_pos + batch_size
return samples[start_pos:end_pos]
def save_plot_sample(samples, idx, identifier, n_samples=6, num_epochs=None, ncol=2, path='./'):
assert n_samples <= samples.shape[0]
assert n_samples % ncol == 0
sample_length = samples.shape[1]
if not num_epochs is None:
col = hsv_to_rgb((1, 1.0*(idx)/num_epochs, 0.8))
else:
col = 'grey'
x_points = np.arange(sample_length)
nrow = int(n_samples/ncol)
fig, axarr = plt.subplots(nrow, ncol, sharex=True, figsize=(6, 6))
for m in range(nrow):
for n in range(ncol):
# first column
sample = samples[n*nrow + m, :, 0]
axarr[m, n].plot(x_points, sample, color=col)
axarr[m, n].set_ylim(-1, 1)
for n in range(ncol):
axarr[-1, n].xaxis.set_ticks(range(0, sample_length, int(sample_length/4)))
fig.suptitle(idx)
fig.subplots_adjust(hspace = 0.15)
fig.savefig(path + "/" + identifier + "_sig" + str(idx).zfill(4) + ".png")
print(path + "/" + identifier + "_sig" + str(idx).zfill(4) + ".png")
plt.clf()
plt.close()
return
def sine_wave(seq_length=30, num_samples=28*5*100, num_signals=1,
freq_low=1, freq_high=5, amplitude_low = 0.1, amplitude_high=0.9,
random_seed=None, **kwargs):
ix = np.arange(seq_length) + 1
samples = []
for i in range(num_samples):
signals = []
for i in range(num_signals):
f = np.random.uniform(low=freq_high, high=freq_low) # frequency
A = np.random.uniform(low=amplitude_high, high=amplitude_low) # amplitude
# offset
offset = np.random.uniform(low=-np.pi, high=np.pi)
signals.append(A*np.sin(2*np.pi*f*ix/float(seq_length) + offset))
samples.append(np.array(signals).T)
# the shape of the samples is num_samples x seq_length x num_signals
samples = np.array(samples)
return samples
########################
# DATA LOADING
########################
print ("loading data...")
samples = sine_wave()
inputs_train, inputs_validation, inputs_test = data_utils_2.split(samples, [0.6, 0.2, 0.2])
save_plot_sample(samples[0:7], '0', 'test_RVAE', path=experiment_id)
print ("data loaded.")
# runs the experiment 5 times
#identifiers = ['eICU_RVAE_synthetic_dataset_VAE_r' + str(i) for i in range(1)]
#for identifier in identifiers:
#identifier = identifiers[0]
#training config
batch_size = 32
print ("data loaded.")
seq_length = inputs_train.shape[1]
num_features = inputs_train.shape[2]
# not used
random_seed = 0
########################
# ENCODER
########################
def encoder(hidden_units_enc, emb_dim, mult):
with tf.variable_scope("encoder") as scope:
input_seq_enc = tf.placeholder(tf.float32, [batch_size, seq_length, num_features])
cell = tf.contrib.rnn.LSTMCell(num_units=hidden_units_enc, state_is_tuple=True)
enc_rnn_outputs, enc_rnn_states = tf.nn.dynamic_rnn(
cell=cell,
dtype=tf.float32,
#sequence_length=[seq_length]*batch_size,
inputs=input_seq_enc)
z_mean = tf.layers.dense(enc_rnn_states[1], emb_dim)
z_log_sigma_sq = tf.layers.dense(enc_rnn_states[1], emb_dim)
# Draw one sample z from Gaussian distribution with mean 0 and std 1
eps = tf.random_normal((batch_size, emb_dim), 0, 1, dtype=tf.float32)
# z = mu + sigma*epsilon
latent_emb = tf.add(z_mean, tf.multiply(tf.exp(tf.multiply(z_log_sigma_sq,0.5)), eps))
latent_loss = mult * (-0.5) * tf.reduce_sum(1 + z_log_sigma_sq
- tf.square(z_mean)
- tf.exp(z_log_sigma_sq), 1)
latent_loss = tf.reduce_mean(latent_loss)
return input_seq_enc, enc_rnn_outputs, enc_rnn_states, latent_emb, latent_loss
########################
# DECODER
########################
def decoder(hidden_units_dec, latent_emb, input_seq_enc):
with tf.variable_scope("decoder") as scope:
W_out_dec = tf.Variable(tf.truncated_normal([hidden_units_dec,num_features]))
b_out_dec = tf.Variable(tf.truncated_normal([num_features]))
dec_inputs = tf.zeros(tf.shape(input_seq_enc))
#use latent embedding as inputs
#dec_inputs = tf.layers.dense(latent_emb, latent_emb.shape[1].value, activation=tf.nn.tanh)
#dec_inputs = tf.tile(dec_inputs, [1, seq_length])
#dec_inputs = tf.reshape(dec_inputs, [batch_size, seq_length, latent_emb.shape[1].value])
dec_initial_state = tf.layers.dense(latent_emb, hidden_units_dec, activation=tf.nn.tanh)
#init_state = tf.contrib.rnn.LSTMStateTuple(tf.zeros([batch_size, hidden_units_dec]),tf.zeros([batch_size, hidden_units_dec]))
#init_state = tf.contrib.rnn.LSTMStateTuple(dec_initial_state, dec_initial_state)
cell = tf.contrib.rnn.BasicRNNCell(num_units=hidden_units_dec)
dec_rnn_outputs, dec_rnn_states = tf.nn.dynamic_rnn(
cell=cell,
dtype=tf.float32,
#sequence_length=[seq_length]*batch_size,
initial_state=dec_initial_state,
inputs=dec_inputs)
rnn_outputs_2d = tf.reshape(dec_rnn_outputs, [-1, hidden_units_dec])
logits_2d = tf.matmul(rnn_outputs_2d, W_out_dec) + b_out_dec
output_3d = tf.reshape(logits_2d, [-1, seq_length, num_features])
#output_3d = tf.tanh(output_3d)
reconstruction_loss = tf.reduce_mean(tf.square(tf.subtract(output_3d,input_seq_enc)))
#reconstruction_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_3d, labels=input_seq_enc))
return reconstruction_loss, output_3d
########################
# TRAINING
########################
learning_rate = [args.learning_rate]
delta_error = [0]
optimizer_str = [args.optimizer_str]
hidden_units_dec = [args.hidden_units_dec]
hidden_units_enc = [args.hidden_units_enc]
emb_dim = [args.emb_dim]
mult = [args.mult]
configs = itertools.product(learning_rate, delta_error, optimizer_str, hidden_units_dec, hidden_units_enc, emb_dim, mult)
config_keys = ['learning_rate', 'delta_error', 'optimizer_str', 'hidden_units_dec', 'hidden_units_enc', 'emb_dim', 'mult']
verbose = 3
max_epochs=10000
patience=5
minibatch_size_train=batch_size
minibatch_size_validation=batch_size
minibatch_size_test=batch_size
for config in configs:
num_mini_batches_train = int(math.ceil(len(inputs_train) / float(minibatch_size_train)))
num_mini_batches_validation = int(math.ceil(len(inputs_validation) / float(minibatch_size_validation)))
num_mini_batches_test = int(math.ceil(len(inputs_test) / float(minibatch_size_test)))
experiment_random_id = str(int(np.random.rand(1)*1000000))
config_id = str(config).replace(", ", "_").replace("'", "")[1:-1] + "_" + experiment_random_id
if verbose > 0:
print(config_id)
tf.reset_default_graph()
learning_rate = config[config_keys.index('learning_rate')]
delta_error = config[config_keys.index('delta_error')]
optimizer_str = config[config_keys.index('optimizer_str')]
with tf.variable_scope("trainer"):
hidden_units_enc = config[config_keys.index('hidden_units_enc')]
hidden_units_dec = config[config_keys.index('hidden_units_dec')]
emb_dim = config[config_keys.index('emb_dim')]
mult = config[config_keys.index('mult')]
input_seq_enc, enc_rnn_outputs, enc_rnn_states, latent_emb, latent_loss = encoder(hidden_units_enc, emb_dim, mult)
# when the network has the same length enc_rnn_states[1] == enc_rnn_outputs[:,-1,:]
#in LSTM enc_rnn_states[0] is c; enc_rnn_states[1] is h
#latent_emb = enc_rnn_states[1]
reconstruction_loss, output_3d_pred = decoder(hidden_units_dec, latent_emb, input_seq_enc)
cost = reconstruction_loss + latent_loss
global_step = tf.Variable(np.int64(0), name='global_step', trainable=False)
if (optimizer_str == "adagrad_epochs") or (optimizer_str == "adagrad_minibatch_iterations"):
train = tf.train.AdagradDAOptimizer(learning_rate, global_step).minimize(cost)
elif optimizer_str == "rmsprop":
train = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
elif optimizer_str == "adam":
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
elif optimizer_str == "sgd":
train = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
#train
global_steps_count = 0
keep_training = True
epoch_counter = 0
patience_counter = 0
best_val_cost = 9999999999
costs_train = []
costs_val = []
costs_test = []
saved = False
#Initial costs
# start training
weighted_cost_sum = 0
for mbi in range(num_mini_batches_train):
input_ = get_batch(inputs_train, mbi, minibatch_size_train)
# FIX THIS! deal with last samples available in the set
if len(input_) == batch_size:
feed_dict = {input_seq_enc:input_, global_step:global_steps_count}
res = sess.run([cost], feed_dict=feed_dict)
weighted_cost_sum += res[0]*len(input_)
cost_train = weighted_cost_sum / len(inputs_train)
costs_train.append(cost_train)
if verbose > 1:
print(cost_train)
# validation cost
weighted_cost_sum = 0
for mbi in range(num_mini_batches_validation):
input_ = get_batch(inputs_validation, mbi, minibatch_size_validation)
if len(input_) == batch_size:
feed_dict = {input_seq_enc:input_, global_step:global_steps_count}
res = sess.run([cost], feed_dict=feed_dict)
weighted_cost_sum += res[0]*len(input_)
cost_val = weighted_cost_sum / len(inputs_validation)
costs_val.append(cost_val)
if verbose > 1:
print(cost_val)
# compute test cost
# this should be optional since it is not needed in every epoch
# I am doing it to get the learning curve
weighted_cost_sum = 0
for mbi in range(num_mini_batches_test):
input_ = get_batch(inputs_test, mbi, minibatch_size_test)
if len(input_) == batch_size:
feed_dict = {input_seq_enc:input_, global_step:global_steps_count}
res = sess.run([cost], feed_dict=feed_dict)
weighted_cost_sum += res[0]*len(input_)
cost_test = weighted_cost_sum / len(inputs_test)
costs_test.append(cost_test)
if verbose > 1:
print(cost_test)
print("++++++++++++++++++++++++++++++++++")
# start training
while keep_training:
time_start = time.time()
#shuffle data
np.random.shuffle(inputs_train)
weighted_cost_sum = 0
for mbi in range(num_mini_batches_train):
input_ = get_batch(inputs_train, mbi, minibatch_size_train)
# FIX THIS! deal with last samples available in the set
if len(input_) == batch_size:
feed_dict = {input_seq_enc:input_, global_step:global_steps_count}
res = sess.run([train, cost, reconstruction_loss, latent_loss], feed_dict=feed_dict)
if config[2] == "adagrad_epochs":
global_steps_count += 1
# since last minibatch can have different lenth, we compute the mean cost as a
# weighted mean
weighted_cost_sum += res[1]*len(input_)
cost_train = weighted_cost_sum / len(inputs_train)
costs_train.append(cost_train)
if verbose > 1:
print(res[2])
print(res[3])
print(cost_train)
print("-")
# validation cost
weighted_cost_sum = 0
for mbi in range(num_mini_batches_validation):
input_ = get_batch(inputs_validation, mbi, minibatch_size_validation)
if len(input_) == batch_size:
feed_dict = {input_seq_enc:input_, global_step:global_steps_count}
res = sess.run([cost], feed_dict=feed_dict)
weighted_cost_sum += res[0]*len(input_)
cost_val = weighted_cost_sum / len(inputs_validation)
costs_val.append(cost_val)
if verbose > 1:
print(cost_val)
# compute test cost
# this should be optional since it is not needed in every epoch
# I am doing it to get the learning curve
weighted_cost_sum = 0
for mbi in range(num_mini_batches_test):
input_ = get_batch(inputs_test, mbi, minibatch_size_test)
if len(input_) == batch_size:
feed_dict = {input_seq_enc:input_, global_step:global_steps_count}
res = sess.run([cost], feed_dict=feed_dict)
weighted_cost_sum += res[0]*len(input_)
cost_test = weighted_cost_sum / len(inputs_test)
costs_test.append(cost_test)
# check patience
if cost_val <= best_val_cost - delta_error:
best_val_cost = cost_val
patience_counter = 0
save_path = saver.save(sess, "./" + experiment_id + "/" + config_id + "_model.ckpt")
saved = True
else:
patience_counter += 1
if patience_counter > patience:
keep_training = False
if saved:
saver.restore(sess, "./" + experiment_id + "/" + config_id + "_model.ckpt")
epoch_counter += 1
if config[2] == "adagrad_minibatch_iterations":
global_steps_count = epoch_counter
if epoch_counter >= max_epochs:
keep_training = False
if verbose > 1:
print(time.time() - time_start)
print('--------------------')
print((inputs_train[0] == inputs_train[1]).all())
# save learning curve plots
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Cost')
plt.title('Learning curves')
plt.plot(range(len(costs_train)), costs_train, label='training')
plt.plot(range(len(costs_val)), costs_val, label='validation')
plt.plot(range(len(costs_test)), costs_test, label='test')
plt.legend(loc="upper right")
plt.savefig("./" + experiment_id + "/" + config_id + "_learning_curves.png", dpi=300)
# validation costs
predicted_values = []
predicted_values_not_flatten = []
true_values = []
true_values_not_flatten = []
other_scores_validation = []
weighted_cost_sum = 0
for mbi in range(num_mini_batches_validation):
input_ = get_batch(inputs_validation, mbi, minibatch_size_validation)
if len(input_) == batch_size:
feed_dict = {input_seq_enc:input_, global_step:global_steps_count}
res = sess.run([cost], feed_dict=feed_dict)
weighted_cost_sum += res[0]*len(input_)
cost_val = weighted_cost_sum / len(inputs_validation)
costs_val.append(cost_val)
other_scores_validation = {}
# test costs
predicted_values = []
predicted_values_not_flatten = []
true_values = []
true_values_not_flatten = []
other_scores_test = []
weighted_cost_sum = 0
for mbi in range(num_mini_batches_test):
input_ = get_batch(inputs_test, mbi, minibatch_size_test)
if len(input_) == batch_size:
feed_dict = {input_seq_enc:input_, global_step:global_steps_count}
res = sess.run([cost], feed_dict=feed_dict)
weighted_cost_sum += res[0]*len(input_)
cost_test = weighted_cost_sum / len(inputs_test)
costs_test.append(cost_test)
other_scores_test = {}
total_time = time.time() - time_start
# need to convert values to float64 to be able to serialize them
to_store = {'config': config, 'costs_train': costs_train, 'costs_val': costs_val, 'costs_test': costs_test,
'best_val_cost': best_val_cost, 'total_time': total_time, 'random_seed': random_seed,
'experiment_random_id': experiment_random_id, 'other_scores_validation': other_scores_validation,
'other_scores_test': other_scores_test}
with open("./" + experiment_id + "/" + config_id + ".json", "w") as f:
json.dump(to_store, f)
if verbose > 0:
print("==========================")
print(cost_val)
print(best_val_cost)
print(cost_test)
print("==========================")
print((inputs_train[0] == inputs_train[1]).all())
########################
# SYNTHETIC SAMPLES GENERATION
########################
# Generate new samples
generated_samples = []
for i in range(num_mini_batches_train):
feed_dict = {latent_emb:np.random.normal(size=(32,emb_dim))}
res = sess.run([output_3d_pred], feed_dict=feed_dict)
generated_samples.append(res[0])
generated_samples = np.vstack(generated_samples)
save_plot_sample(generated_samples[0:7], '_' + config_id + '_generated', 'test_RVAE', path=experiment_id)
input_ = get_batch(inputs_train, 0, minibatch_size_train)
save_plot_sample(input_[0:7], '_' + config_id + '_input', 'test_RVAE', path=experiment_id)
feed_dict = {input_seq_enc:input_}
res = sess.run([output_3d_pred], feed_dict=feed_dict)
save_plot_sample(res[0][0:7], '_' + config_id + '_reconstuction', 'test_RVAE', path=experiment_id)
| |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import datasets
import datasets.pascal_voc
import os
import datasets.imdb
import xml.dom.minidom as minidom
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import cPickle
import subprocess
class pascal_voc(datasets.imdb):
def __init__(self, image_set, year, devkit_path=None):
datasets.imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.selective_search_roidb
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'top_k' : 2000}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(self.cache_path, '..',
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
box_list.append(raw_data[i][:, (1, 0, 3, 2)] - 1)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_IJCV_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
'{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.
format(self.name, self.config['top_k']))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)
roidb = datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def _load_selective_search_IJCV_roidb(self, gt_roidb):
IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
'selective_search_IJCV_data',
'voc_' + self._year))
assert os.path.exists(IJCV_path), \
'Selective search IJCV data not found at: {}'.format(IJCV_path)
top_k = self.config['top_k']
box_list = []
for i in xrange(self.num_images):
filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
raw_data = sio.loadmat(filename)
box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
cls = self._class_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False}
def _write_voc_results_file(self, all_boxes):
use_salt = self.config['use_salt']
comp_id = 'comp4'
if use_salt:
comp_id += '-{}'.format(os.getpid())
# VOCdevkit/results/VOC2007/Main/comp4-44503_det_test_aeroplane.txt
path = os.path.join(self._devkit_path, 'results', 'VOC' + self._year,
'Main', comp_id + '_')
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = path + 'det_' + self._image_set + '_' + cls + '.txt'
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
return comp_id
def _do_matlab_eval(self, comp_id, output_dir='output'):
rm_results = self.config['cleanup']
path = os.path.join(os.path.dirname(__file__),
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(datasets.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\',{:d}); quit;"' \
.format(self._devkit_path, comp_id,
self._image_set, output_dir, int(rm_results))
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
comp_id = self._write_voc_results_file(all_boxes)
self._do_matlab_eval(comp_id, output_dir)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = datasets.pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed; embed()
| |
#!/usr/bin/env python3
import os
from distutils.version import LooseVersion
import argparse
import base64
import collections
import copy
import itertools
import json
import jsonschema
import pathlib
import shutil
import sys
import tempfile
import re
import zipfile
def main():
parser = argparse.ArgumentParser(
description='This script generates all of the universe objects from '
'the universe repository. The files created in --out-dir are: '
'universe.json.')
parser.add_argument(
'--repository',
required=True,
type=pathlib.Path,
help='Path to the top level package directory. E.g. repo/packages')
parser.add_argument(
'--out-dir',
dest='outdir',
required=True,
type=pathlib.Path,
help='Path to the directory to use to store all universe objects')
args = parser.parse_args()
if not args.outdir.is_dir():
print('The path in --out-dir [{}] is not a directory. Please create it'
' before running this script.'.format(args.outdir))
return
if not args.repository.is_dir():
print('The path in --repository [{}] is not a directory.'.format(
args.repository))
return
packages = [
generate_package_from_path(
args.repository,
package_name,
release_version)
for package_name, release_version
in enumerate_dcos_packages(args.repository)
]
# Render entire universe
universe_path = args.outdir / 'universe.json'
with universe_path.open('w', encoding='utf-8') as universe_file:
json.dump({'packages': packages}, universe_file)
ct_universe_path = args.outdir / 'universe.content_type'
create_content_type_file(ct_universe_path, "v4")
# Render empty json
empty_path = args.outdir / 'repo-empty-v3.json'
with empty_path.open('w', encoding='utf-8') as universe_file:
json.dump({'packages': []}, universe_file)
ct_empty_path = args.outdir / 'repo-empty-v3.content_type'
create_content_type_file(ct_empty_path, "v3")
zip_file_dcos_versions = ["1.6.1", "1.7"]
json_file_dcos_versions = ["1.8", "1.9", "1.10", "1.11", "1.12"]
# create universe-by-version files for `dcos_versions`
dcos_versions = zip_file_dcos_versions + json_file_dcos_versions
[render_universe_by_version(
args.outdir, copy.deepcopy(packages), version) for version in dcos_versions]
for dcos_version in json_file_dcos_versions:
_populate_dcos_version_json_to_folder(dcos_version, args.outdir)
def render_universe_by_version(outdir, packages, version):
"""Render universe packages for `version`. Zip files for versions < 1.8,
and json files for version >= 1.8
:param outdir: Path to the directory to use to store all universe objects
:type outdir: str
:param packages: package dictionary
:type packages: dict
:param version: DC/OS version
:type version: str
:rtype: None
"""
if LooseVersion(version) < LooseVersion("1.8"):
render_zip_universe_by_version(outdir, packages, version)
else:
file_path = render_json_by_version(outdir, packages, version)
_validate_repo(file_path, version)
render_content_type_file_by_version(outdir, version)
def json_escape_compatibility(schema: collections.OrderedDict) -> collections.OrderedDict:
""" Further escape any singly escaped stringified JSON in config """
for value in schema.values():
if "description" in value:
value["description"] = escape_json_string(value["description"])
if "type" in value:
if value["type"] == "string" and "default" in value:
value["default"] = escape_json_string(value["default"])
elif value["type"] == "object" and "properties" in value:
value["properties"] = json_escape_compatibility(value["properties"])
return schema
def escape_json_string(string: str) -> str:
""" Makes any single escaped double quotes doubly escaped. """
def escape_underescaped_slash(matchobj):
""" Return adjacent character + extra escaped double quote. """
return matchobj.group(1) + "\\\""
# This regex means: match .\" except \\\" while capturing `.`
return re.sub('([^\\\\])\\\"', escape_underescaped_slash, string)
def render_content_type_file_by_version(outdir, version):
"""Render content type file for `version`
:param outdir: Path to the directory to use to store all universe objects
:type outdir: str
:param version: DC/OS version
:type version: str
:rtype: None
"""
universe_version = \
"v3" if LooseVersion(version) < LooseVersion("1.10") else "v4"
ct_file_path = \
outdir / 'repo-up-to-{}.content_type'.format(version)
create_content_type_file(ct_file_path, universe_version)
def create_content_type_file(path, universe_version):
""" Creates a file with universe repo version `universe_version` content-type
as its contents.
:param path: the name of the content-type file
:type path: str
:param universe_version: Universe content type version: "v3" or "v4"
:type universe_version: str
:rtype: None
"""
with path.open('w', encoding='utf-8') as ct_file:
content_type = format_universe_repo_content_type(universe_version)
ct_file.write(content_type)
def format_universe_repo_content_type(universe_version):
""" Formats a universe repo content-type of version `universe-version`
:param universe_version: Universe content type version: "v3" or "v4"
:type universe_version: str
:return: content-type of the universe repo version `universe_version`
:rtype: str
"""
content_type = "application/" \
"vnd.dcos.universe.repo+json;" \
"charset=utf-8;version=" \
+ universe_version
return content_type
def render_json_by_version(outdir, packages, version):
"""Render json file for `version`
:param outdir: Path to the directory to use to store all universe objects
:type outdir: str
:param packages: package dictionary
:type packages: dict
:param version: DC/OS version
:type version: str
:return: the path where the universe was stored
:rtype: str
"""
packages = filter_and_downgrade_packages_by_version(packages, version)
json_file_path = outdir / 'repo-up-to-{}.json'.format(version)
with json_file_path.open('w', encoding='utf-8') as universe_file:
json.dump({'packages': packages}, universe_file)
return json_file_path
def filter_and_downgrade_packages_by_version(packages, version):
"""Filter packages by `version` and the downgrade if needed
:param packages: package dictionary
:type packages: dict
:param version: DC/OS version
:type version: str
:return packages filtered (and may be downgraded) on `version`
:rtype package dictionary
"""
packages = [
package for package in packages if filter_by_version(package, version)
]
if LooseVersion(version) < LooseVersion('1.10'):
# Prior to 1.10, Cosmos had a rendering bug that required
# stringified JSON to be doubly escaped. This was corrected
# in 1.10, but it means that packages with stringified JSON parameters
# that need to bridge versions must be accommodated.
#
# < 1.9 style escaping:
# \\\"field\\\": \\\"value\\\"
#
# >= 1.10 style escaping:
# \"field\": \"value\"
for package in packages:
if "config" in package and "properties" in package["config"]:
# The rough shape of a config file is:
# {
# "schema": ...,
# "properties": { }
# }
# Send just the top level properties in to the recursive
# function json_escape_compatibility.
package["config"]["properties"] = json_escape_compatibility(
package["config"]["properties"])
packages = [downgrade_package_to_v3(package) for package in packages]
return packages
def render_zip_universe_by_version(outdir, packages, version):
"""Render zip universe for `version`
:param outdir: Path to the directory to use to store all universe objects
:type outdir: str
:param package: package dictionary
:type package: dict
:param version: DC/OS version
:type version: str
:rtype: None
"""
with tempfile.NamedTemporaryFile() as temp_file:
with zipfile.ZipFile(temp_file, mode='w') as zip_file:
render_universe_zip(
zip_file,
filter(
lambda package: filter_by_version(package, version),
packages)
)
zip_name = 'repo-up-to-{}.zip'.format(version)
shutil.copy(temp_file.name, str(outdir / zip_name))
def filter_by_version(package, version):
"""Prediate for checking for packages of version `version` or less
:param package: package dictionary
:type package: dict
:param version: DC/OS version
:type version: str
:rtype: bool
"""
package_version = LooseVersion(
package.get('minDcosReleaseVersion', '0.0')
)
filter_version = LooseVersion(version)
return package_version <= filter_version
def package_path(root, package_name, release_version):
"""Returns the path to the package directory
:param root: path to the root of the repository
:type root: pathlib.Path
:param package_name: name of the package
:type package_name: str
:param release_version: package release version
:type release_version: int
:rtype: pathlib.Path
"""
return (root /
package_name[:1].upper() /
package_name /
str(release_version))
def read_package(path):
"""Reads the package.json as a dict
:param path: path to the package
:type path: pathlib.Path
:rtype: dict
"""
path = path / 'package.json'
with path.open(encoding='utf-8') as file_object:
return json.load(file_object)
def read_resource(path):
"""Reads the resource.json as a dict
:param path: path to the package
:type path: pathlib.Path
:rtype: dict | None
"""
path = path / 'resource.json'
if path.is_file():
with path.open(encoding='utf-8') as file_object:
return json.load(file_object)
def read_marathon_template(path):
"""Reads the marathon.json.mustache as a base64 encoded string
:param path: path to the package
:type path: pathlib.Path
:rtype: str | None
"""
path = path / 'marathon.json.mustache'
if path.is_file():
with path.open(mode='rb') as file_object:
return base64.standard_b64encode(file_object.read()).decode()
def read_config(path):
"""Reads the config.json as a dict
:param path: path to the package
:type path: pathlib.Path
:rtype: dict | None
"""
path = path / 'config.json'
if path.is_file():
with path.open(encoding='utf-8') as file_object:
# Load config file into a OrderedDict to preserve order
return json.load(
file_object,
object_pairs_hook=collections.OrderedDict
)
def read_command(path):
"""Reads the command.json as a dict
:param path: path to the package
:type path: pathlib.Path
:rtype: dict | None
"""
path = path / 'command.json'
if path.is_file():
with path.open(encoding='utf-8') as file_object:
return json.load(file_object)
def generate_package_from_path(root, package_name, release_version):
"""Returns v3 package metadata for the specified package
:param root: path to the root of the repository
:type root: pathlib.Path
:param package_name: name of the package
:type package_name: str
:param release_version: package release version
:type release_version: int
:rtype: dict
"""
path = package_path(root, package_name, release_version)
return generate_package(
release_version,
read_package(path),
resource=read_resource(path),
marathon_template=read_marathon_template(path),
config=read_config(path),
command=read_command(path)
)
def generate_package(
release_version,
package,
resource,
marathon_template,
config,
command):
"""Returns v3 package object for package. See
repo/meta/schema/v3-repo-schema.json
:param release_version: package release version
:type release_version: int
:param package: content of package.json
:type package: dict
:param resource: content of resource.json
:type resource: dict | None
:param marathon_template: content of marathon.json.template as base64
:type marathon_template: str | None
:param config: content of config.json
:type config: dict | None
:param command: content of command.json
:type command: dict | None
:rtype: dict
"""
package = package.copy()
package['releaseVersion'] = release_version
if resource:
package['resource'] = resource
if marathon_template:
package['marathon'] = {
'v2AppMustacheTemplate': marathon_template
}
if config:
package['config'] = config
if command:
package['command'] = command
return package
def enumerate_dcos_packages(packages_path):
"""Enumerate all of the package and release version to include
:param packages_path: the path to the root of the packages
:type packages_path: str
:returns: generator of package name and release version
:rtype: gen((str, int))
"""
for letter_path in packages_path.iterdir():
for package in letter_path.iterdir():
for release_version in package.iterdir():
yield (package.name, int(release_version.name))
def render_universe_zip(zip_file, packages):
"""Populates a zipfile from a list of universe v3 packages. This function
creates directories to be backwards compatible with legacy Cosmos.
:param zip_file: zipfile where we need to write the packages
:type zip_file: zipfile.ZipFile
:param packages: list of packages
:type packages: [dict]
:rtype: None
"""
packages = sorted(
packages,
key=lambda package: (package['name'], package['releaseVersion']))
root = pathlib.Path('universe')
create_dir_in_zip(zip_file, root)
create_dir_in_zip(zip_file, root / 'repo')
create_dir_in_zip(zip_file, root / 'repo' / 'meta')
zip_file.writestr(
str(root / 'repo' / 'meta' / 'index.json'),
json.dumps(create_index(packages)))
zip_file.writestr(
str(root / 'repo' / 'meta' / 'version.json'),
json.dumps({'version': '2.0.0'}))
packagesDir = root / 'repo' / 'packages'
create_dir_in_zip(zip_file, packagesDir)
currentLetter = ''
currentPackageName = ''
for package in packages:
if currentLetter != package['name'][:1].upper():
currentLetter = package['name'][:1].upper()
create_dir_in_zip(zip_file, packagesDir / currentLetter)
if currentPackageName != package['name']:
currentPackageName = package['name']
create_dir_in_zip(
zip_file,
packagesDir / currentLetter / currentPackageName)
package_directory = (
packagesDir /
currentLetter /
currentPackageName /
str(package['releaseVersion'])
)
create_dir_in_zip(zip_file, package_directory)
write_package_in_zip(zip_file, package_directory, package)
def create_dir_in_zip(zip_file, directory):
"""Create a directory in a zip file
:param zip_file: zip file where the directory will get created
:type zip_file: zipfile.ZipFile
:param directory: path for the directory
:type directory: pathlib.Path
:rtype: None
"""
zip_file.writestr(str(directory) + '/', b'')
def write_package_in_zip(zip_file, path, package):
"""Write packages files in the zip file
:param zip_file: zip file where the files will get created
:type zip_file: zipfile.ZipFile
:param path: path for the package directory. E.g.
universe/repo/packages/M/marathon/0
:type path: pathlib.Path
:param package: package information dictionary
:type package: dict
:rtype: None
"""
package = downgrade_package_to_v2(package)
package.pop('releaseVersion')
resource = package.pop('resource', None)
if resource:
zip_file.writestr(
str(path / 'resource.json'),
json.dumps(resource))
marathon_template = package.pop(
'marathon',
{}
).get(
'v2AppMustacheTemplate'
)
if marathon_template:
zip_file.writestr(
str(path / 'marathon.json.mustache'),
base64.standard_b64decode(marathon_template))
config = package.pop('config', None)
if config:
zip_file.writestr(
str(path / 'config.json'),
json.dumps(config))
command = package.pop('command', None)
if command:
zip_file.writestr(
str(path / 'command.json'),
json.dumps(command))
zip_file.writestr(
str(path / 'package.json'),
json.dumps(package))
def create_index(packages):
"""Create an index for all of the packages
:param packages: list of packages
:type packages: [dict]
:rtype: dict
"""
index = {
'version': '2.0.0',
'packages': [
create_index_entry(same_packages)
for _, same_packages
in itertools.groupby(packages, key=lambda package: package['name'])
]
}
return index
def create_index_entry(packages):
"""Create index entry from packages with the same name.
:param packages: list of packages with the same name
:type packages: [dict]
:rtype: dict
"""
entry = {
'versions': {}
}
for package in packages:
entry.update({
'name': package['name'],
'currentVersion': package['version'],
'description': package['description'],
'framework': package.get('framework', False),
'tags': package['tags'],
'selected': package.get('selected', False)
})
entry['versions'][package['version']] = str(package['releaseVersion'])
return entry
def v3_to_v2_package(v3_package):
"""Converts a v3 package to a v2 package
:param v3_package: a v3 package
:type v3_package: dict
:return: a v2 package
:rtype: dict
"""
package = copy.deepcopy(v3_package)
package.pop('minDcosReleaseVersion', None)
package['packagingVersion'] = "2.0"
resource = package.get('resource', None)
if resource:
cli = resource.pop('cli', None)
if cli and 'command' not in package:
print(('WARNING: Removing binary CLI from ({}, {}) without a '
'Python CLI').format(package['name'], package['version']))
return package
def v4_to_v3_package(v4_package):
"""Converts a v4 package to a v3 package
:param v4_package: a v3 package
:type v4_package: dict
:return: a v3 package
:rtype: dict
"""
package = copy.deepcopy(v4_package)
package.pop('upgradesFrom', None)
package.pop('downgradesTo', None)
package["packagingVersion"] = "3.0"
return package
def downgrade_package_to_v2(package):
"""Converts a v4 or v3 package to a v2 package. If given a v2
package, it creates a deep copy but does not modify it. It does not
modify the original package.
:param package: v4, v3, or v2 package
:type package: dict
:return: a v2 package
:rtyte: dict
"""
packaging_version = package.get("packagingVersion")
if packaging_version == "2.0":
return copy.deepcopy(package)
elif packaging_version == "3.0":
return v3_to_v2_package(package)
else:
return v3_to_v2_package(v4_to_v3_package(package))
def downgrade_package_to_v3(package):
"""Converts a v4 package to a v3 package. If given a v3 or v2 package
it creates a deep copy of it, but does not modify it. It does not
modify the original package.
:param package: v4, v3, or v2 package
:type package: dict
:return: a v3 or v2 package
:rtyte: dict
"""
packaging_version = package.get("packagingVersion")
if packaging_version == "2.0" or packaging_version == "3.0":
return copy.deepcopy(package)
else:
return v4_to_v3_package(package)
def validate_repo_with_schema(repo_json_data, repo_version):
"""Validates a repo and its version against the corresponding schema
:param repo_json_data: The json of repo
:param repo_version: version of the repo (e.g.: v4)
:return: list of validation errors ( length == zero => No errors)
"""
validator = jsonschema.Draft4Validator(_load_jsonschema(repo_version))
errors = []
for error in validator.iter_errors(repo_json_data):
for suberror in sorted(error.context, key=lambda e: e.schema_path):
errors.append('{}: {}'.format(list(suberror.schema_path), suberror.message))
return errors
def _populate_dcos_version_json_to_folder(dcos_version, outdir):
"""Populate the repo-up-to-<dcos-version>.json file to a folder.
The folder structure would be :
<dcos-version>/
package/
<name-of-package1>.json
<name-of-package2>.json
:param dcos_version: The version of DC/OS file to process.
:type dcos_version: str
:param outdir: Path to the directory to use to store all universe objects
:type outdir: str
:return: None
"""
repo_dir = outdir / dcos_version / 'package'
pathlib.Path(repo_dir).mkdir(parents=True, exist_ok=True)
repo_file = pathlib.Path(outdir / 'repo-up-to-{}.json'.format(dcos_version))
with repo_file.open('r', encoding='utf-8') as f:
data = json.loads(f.read())
packages_dict = {}
for package in data.get('packages'):
package_name = package.get('name')
package_list = packages_dict.get(package_name, [])
package_list.append(package)
packages_dict[package_name] = package_list
for package_name, package_list in packages_dict.items():
with pathlib.Path(repo_dir / '{}.json'.format(package_name))\
.open('w', encoding='utf-8') as f:
json.dump({'packages': package_list}, f)
def _validate_repo(file_path, version):
"""Validates a repo JSON file against the given version.
:param file_path: the path where the universe was stored
:type file_path: str
:param version: DC/OS version
:type version: str
:rtype: None
"""
if LooseVersion(version) >= LooseVersion('1.10'):
repo_version = 'v4'
else:
repo_version = 'v3'
with file_path.open(encoding='utf-8') as repo_file:
repo = json.loads(repo_file.read())
errors = validate_repo_with_schema(repo, repo_version)
if len(errors) != 0:
sys.exit(
'ERROR\n\nRepo {} version {} validation errors: {}'.format(
file_path,
repo_version,
'\n'.join(errors)
)
)
def _load_jsonschema(repo_version):
"""Opens and parses the repo schema based on the version provided.
:param repo_version: repo schema version. E.g. v3 vs v4
:type repo_version: str
:return: the schema dictionary
:rtype: dict
"""
with open(
'repo/meta/schema/{}-repo-schema.json'.format(repo_version),
encoding='utf-8'
) as schema_file:
return json.loads(schema_file.read())
if __name__ == '__main__':
sys.exit(main())
| |
# by amounra 0613 : http://www.aumhaa.com
import Live
#import os, __builtin__, __main__, _ast, _codecs, _functools, _md5, _random, _sha, _sha256, _sha512, _socket, _sre, _ssl, _struct, _symtable, _types, _weakref, binascii, cStringIO, collections, datetime, errno, exceptions, fcntl, gc, imp, itertools, marshal, math, operator, posix, pwd, select, signal, sys, thread, time, unicodedata, xxsubtype, zipimport, zlib
import os, __builtin__, __main__, _ast, _codecs, _functools, _md5, _random, _sha, _sha256, _sha512, _socket, _sre, _ssl, _struct, _symtable, _types, _weakref, binascii, cStringIO, collections, datetime, errno, exceptions, gc, imp, itertools, marshal, math, sys, time
#modules = [__builtin__, __main__, _ast, _codecs, _functools, _md5, _random, _sha, _sha256, _sha512, _socket, _sre, _ssl, _struct, _symtable, _types, _weakref, binascii, cStringIO, collections, datetime, errno, exceptions, fcntl, gc, imp, itertools, marshal, math, operator, posix, pwd, select, signal, sys, thread, time, unicodedata, xxsubtype, zipimport, zlib]
modules = []
DIRS_TO_REBUILD = ['Debug', 'AumPC20_b995_9', 'AumPC40_b995_9', 'AumPush_b995', 'AumTroll_b995_9', 'AumTroll_b995_9_G', 'Base_9_LE', 'BlockMod_b995_9', 'Codec_b995_9', 'Codex', 'LaunchMod_b995_9', 'Lemur256_b995_9', 'LemurPad_b995_9', 'Livid_Alias8', 'Livid_Base', 'Livid_Block', 'Livid_CNTRLR', 'Livid_CodeGriid', 'Livid_CodeRemoteScriptLinked', 'Livid_Ohm64', 'Livid_OhmModes', 'MonOhm_b995_9', 'Monomodular_b995_9']
MODS_TO_REBUILD = ['Debug', 'AumPC20', 'AumPC40', 'AumPush', 'AumTroll', 'AumTroll_G', 'Base', 'BlockMod', 'Codec', 'LaunchMod', 'Lemur256', 'LemurPad', 'Alias8', 'Block', 'CNTRLR', 'CodeGriid', 'Ohm64', 'MonOhm', 'Monomodular']
from _Tools.re import *
from _Framework.ControlSurface import *
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
#if not ("/Users/amounra/monomodular_git/L9 Python Scripts/") in sys.path:
# sys.path.append("/Users/amounra/monomodular_git/L9 Python Scripts/")
def rebuild_sys():
modnames = []
for module in get_control_surfaces():
if isinstance(module, Debug):
#modnames.append['debug found:']
modnames = module.rebuild_sys()
break
return modnames
def list_new_modules():
modnames = []
for module in get_control_surfaces():
if isinstance(module, Debug):
#modnames.append['debug found:']
modnames = module.rollbackImporter.newModules
break
return modnames
def rollback_is_enabled():
control_surfaces = get_control_surfaces()
if 'Debug' in control_surfaces:
debug = control_surfaces['Debug']
modnames = debug.rollbackImporter.newModules.keys()
return modnames
def log_sys_modules():
modnames = []
for module in get_control_surfaces():
if isinstance(module, Debug):
#modnames.append['debug found:']
module._log_sys_modules()
break
class Debug(ControlSurface):
def __init__(self, *a, **k):
super(Debug, self).__init__(*a, **k)
self.rollbackImporter = None
#self._log_version_data()
#self._log_sys_modules()
#self._log_paths()
self._start_importer()
#self._log_dirs()
self.log_message('_^_^_^_^_^_^_^_^_^_^_^_^_^_^_^_^_ DEBUG ON _^_^_^_^_^_^_^_^_^_^_^_^_^_^_^_^_')
self._scripts = []
def _log_paths(self):
for path in sys.path:
if 'MIDI Remote Scripts' in path:
self.log_message('path: ' + str(path) + ' is: ' + str(os.listdir(path)))
def _start_importer(self):
if not self.rollbackImporter is None:
self.rollbackImporter.uninstall()
self.rollbackImporter = RollbackImporter()
def reimport_module(self, name, new_name = None):
pass
def _log_dirs(self):
#self.log_message(str())
self.log_message(str(sys.path))
#self.log_message(str(__file__) + ' working dir: ' + str(os.listdir(sys.path[5])))
def _reimport_loaded_modules(self):
self.log_message('reimporting loaded modules.')
for module in sys.modules.keys():
self.log_message('preexisting: ' + str(module))
if module is 'Livid_Base':
newBase = Livid_Base
sys.modules[module] = newBase
self.log_message('replaced Livid_Base with new version!')
def _log_version_data(self):
self.log_message('modules: ' + str(sys.builtin_module_names))
self.log_message('version: ' + str(sys.version))
self.log_message('sys.path: ' + str(sys.path))
def _log_sys_modules(self):
pairs = ((v, k) for (v, k) in sys.modules.iteritems())
for module in sorted(pairs):
self.log_message('---' + str(module))
#for item in dir(gc):
# self.log_message(str(item))
#looks_at = gc.get_referrers(self)
#for item in looks_at:
# self.log_message(str(item))
def disconnect(self):
if self.rollbackImporter:
self.rollbackImporter.uninstall()
super(Debug, self).disconnect()
#self._clean_sys()
def _clean_sys(self):
for key, value in sys.modules.items():
if value == None:
del sys.modules[key]
for path in sys.path:
if 'MIDI Remote Scripts' in path:
name_list = os.listdir(path)
for name in name_list:
if name[0] != '_' or '_Mono_Framework' == name[:15]:
for key in sys.modules.keys():
if name == key[:len(name)]:
del sys.modules[key]
#self.log_message('deleting key---' + str(key))
#self._log_sys_modules()
def _log_builtins(self):
for module in dir(module):
self.log_message('--- %s' %(item))
def _log_C_modules(self):
for item in modules:
self.log_message('Module Name: %s' %(item.__name__))
self.log_message('--- %s' %(item.__doc__))
def rebuild_sys(self):
if self.rollbackImporter:
return self.rollbackImporter._rebuild()
else:
return ['no rollbackImporter installed']
def connect_script_instances(self, instanciated_scripts):
self._scripts = instanciated_scripts
for script in self._scripts:
script._debug = True
class ModuleRemover(ControlSurfaceComponent):
def __init__(self, *a, **k):
"Creates an instance of the ModuleRemover for the module that will be removed"
super(ModuleRemover, self).__init__(*a, **k)
def _remove_old_modules(self):
for key, value in sys.modules.items():
if value == None:
del sys.modules[key]
if sys.modules.has_key(self.__name__):
self.log_message('deleting key---' + str(__name__))
del sys.modules[__name__]
def disconnect(self):
super(ModuleRemover, self).disconnect()
self._remove_old_modules()
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
self.realImport = __builtin__.__import__
__builtin__.__import__ = self._import
self.newModules = {}
def _import(self, name, globals=None, locals=None, fromlist=[], *a):
result = apply(self.realImport, (name, globals, locals, fromlist))
self.newModules[name] = 1
return result
def _rebuild(self):
modnames = []
nonames = []
for key, value in sys.modules.items():
if value == None:
del sys.modules[key]
for modname in self.newModules.keys():
if not self.previousModules.has_key(modname):
if modname in sys.modules.keys():
# Force reload when modname next imported
del(sys.modules[modname])
modnames.append(modname)
else:
found = False
for path in sys.path:
if 'MIDI Remote Scripts' in path:
name_list = os.listdir(path)
for name in name_list:
if name[0] != '_' or '_Mono_Framework' == name[:15]:
#if name == key[:len(name)] and not match(modname, name) == None:
fullname = name + '.' + modname
if fullname in sys.modules.keys():
del sys.modules[fullname]
found = True
modnames.append(fullname)
if not found:
nonames.append(modname)
self.previousModules = sys.modules.copy()
return [modnames, nonames]
def uninstall(self):
modnames = self._rebuild()
__builtin__.__import__ = self.realImport
return modnames
| |
import socket
import argparse
class Proxy:
def __init__(self, host, port, prefix="", max_connections=5,
request_size_limit=4096, reuseaddr=True, verbosity=0):
"""
Initialize a new Proxy class.
:param host: address to bind the proxy server to
:param port: port to bind the proxy server to
:param prefix: only accept proxy requests containing this prefix
:param max_connections: max connections the proxy can have at a time
:param request_size_limit: how much to read from a socket
:param reuseaddr: whether to reuse and address if it is occupied
:param verbosity: -1 no messages, 0 regular messages,
1 connection messages, 2 socket messages
:type host: str
:type port: int
:type prefix: str
:type max_connections: int
:type request_size_limit: int
:type reuseaddr: bool
:type verbosity: int
"""
self.host = host
self.port = port
self.prefix = prefix
self.max_connections = max_connections
self.request_size_limit = request_size_limit
self.reuseaddr = reuseaddr
self.verbosity = verbosity
self.server_socket = None
def run(self):
"""
Run the proxy server.
"""
self._bind()
self._listen()
self._accept()
def _bind(self):
"""
Run the bind step for setting up a server. Bind at the host and port
that was instantiated with the class.
"""
try:
if self.verbosity >= 0:
print("Binding to {}:{}".format(self.host, self.port))
self.server_socket = \
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Set option to reuse address on bind
if self.reuseaddr:
self.server_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((self.host, self.port))
except socket.error:
if self.verbosity >= 0:
print("Unable to bind to {}:{}, exiting.".format(
self.host, self.port))
exit(1)
def _listen(self):
"""
Setup server socket to listen for incoming connections.
"""
try:
self.server_socket.listen(self.max_connections)
except socket.error:
if self.verbosity >= 0:
print("Unable to listen, exiting.")
exit(2)
def _accept(self):
"""
Block until a new client connection has been made.
"""
if self.verbosity >= 1:
print("Only proxying connections "
"with prefix: {}".format(self.prefix))
while True:
try:
# Blocking occurs here until a new client connection.
client_socket, client_address = self.server_socket.accept()
if self.verbosity >= 1:
print("New client connection from : {}".format(
client_address))
self.client_request(client_socket, client_address)
client_socket.close()
except KeyboardInterrupt:
if self.verbosity >= 1:
print("Keyboard interruption reached. Closing server.")
self.server_socket.close()
exit(0)
except socket.error:
print("Unable to get client connection.")
def client_request(self, client_socket, client_address):
"""
Handle a client's request. Check if the url contains prefix. If it
does, proxy the client's request.
:param client_socket: socket of the client
:param client_address: address (host, port) of the client
:type client_socket: socket.socket
:type client_address: tuple
"""
request = client_socket.recv(self.request_size_limit)
url = Proxy.get_url(request)
# Ignore requests without the proper prefix (self.prefix)
if url.startswith(self.prefix):
address, path = Proxy.separate_url_and_prefix(url, self.prefix)
request = Proxy.prepare_request(request, address, path)
self.proxy_request(address, 80, request,
client_socket, client_address)
def proxy_request(self, remote_address, remote_port,
request, client_socket, client_address):
"""
Connect to remote_address:remote_port and send the request. Retrieve
reply and send directly to the client.
:param remote_address: address to connect to
:param remote_port: port of the remote address to connect to
:param request: request of the client
:param client_socket: socket of the client
:param client_address: address (host, port) of the client
:type remote_address: str
:type remote_port: int
:type request: bytes
:type client_socket: socket.socket
:type client_address: tuple
"""
remote_connect_socket = \
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_connect_socket.settimeout(2)
remote_connect_socket.connect((remote_address, remote_port))
remote_connect_socket.sendall(request)
try:
while True:
if self.verbosity >= 2:
print("Received data from ('{}' , {})".format(
remote_address, remote_port))
read = remote_connect_socket.recv(self.request_size_limit)
if len(read) == 0:
break
if self.verbosity >= 2:
print("Sending data to {}".format(client_address))
client_socket.send(read)
remote_connect_socket.close()
except socket.error as e:
err = e.args[0]
# Connection has read all the data
if err == "timed out":
remote_connect_socket.close()
else:
if self.verbosity >= 1:
print(e)
exit(3)
@staticmethod
def get_url(request):
"""
Decode a request and extract the url.
:param request: request of the client
:type request: bytes
:return: url found in the request
:rtype: str
"""
decoded_request = request.decode("utf-8")
first_line = decoded_request.split('\n')[0]
url = first_line.split(' ')[1]
return url
@staticmethod
def separate_url_and_prefix(url, prefix):
"""
Separate the domain and path from a url.
:param url: an unseparated url
:param prefix: prefix to remove from the url
:type url: str
:type prefix: str
:return: domain and path
:rtype: str, str
"""
temp = url
if temp.startswith(prefix):
temp = temp[len(prefix):]
if temp.startswith('/'):
temp = temp[1:]
# location of http in the url
http_location = temp.find("http://")
if http_location != -1:
temp = temp[http_location + 7:]
# The slash after the domain
after_slash = temp.find('/')
if after_slash != -1:
return temp[:after_slash], temp[after_slash:]
else:
return temp, '/'
@staticmethod
def prepare_request(request, address, path):
"""
Prepare the request by replacing url in the first line with the path
of the url, replace Host: <localaddress> with Host: <address>.
:param request: request of the client
:param address: external address to connect to
:param path: what to retrieve from address
:type request: bytes
:type address: str
:type path: str
:return: prepared request
:rtype: bytes
"""
rewritten_url = False
rewritten_host = False
decoded_request = request.decode("utf-8")
new_request = ""
for line in decoded_request.split("\r\n"):
if not rewritten_url:
first_line = line.split(' ')
# First line structured as {GET, POST} address protocol
new_request = \
"{} {} {}".format(first_line[0], path, first_line[2])
rewritten_url = True
elif not rewritten_host and line.startswith("Host:"):
new_request += "\r\nHost: {}".format(address)
rewritten_host = True
else:
new_request += "\r\n{}".format(line)
return str.encode(new_request)
def main():
description = "Run a simple HTTP proxy server."
parser = argparse.ArgumentParser(description=description)
address_help = "Address to bind to. [Default: ""] (all interfaces)"
port_help = "Port to bind to. [Default: 8000]"
prefix_help = "Prefix to look for. [Default: /proxy/]"
max_conn_help = "Max number of client connections at a time. [Default: 5]"
size_limit_help = "Max size a network socket can read. [Default: 4096]"
verbosity_help = "-1 off, 0 normal, 1 connection messages, 2 socket " \
"messages. [Default: 0]"
parser.add_argument("-a", "--address", help=address_help,
default="")
parser.add_argument("-f", "--prefix", type=str, help=prefix_help,
default="/proxy/")
parser.add_argument("-m", "--max_connections", type=int,
help=max_conn_help,
default=5)
parser.add_argument("-p", "--port", type=int, help=port_help, default=8000)
parser.add_argument("-s", "--size_limit", type=int, help=size_limit_help,
default=4096)
parser.add_argument("-v", "--verbosity", type=int, help=verbosity_help,
default=0)
args = parser.parse_args()
address = args.address
port = args.port
prefix = args.prefix
max_connections = args.max_connections
size_limit = args.size_limit
verbosity = args.verbosity
proxy = Proxy(address, port, prefix, max_connections, size_limit,
verbosity=verbosity)
proxy.run()
if __name__ == "__main__":
main()
| |
from __future__ import unicode_literals
import tablib
from copy import deepcopy
from datetime import date
from decimal import Decimal
from unittest import skip, skipUnless
from django import VERSION
from django.conf import settings
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.db.models import Count
from django.db.models.fields import FieldDoesNotExist
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from django.utils.html import strip_tags
from import_export import fields, resources, results, widgets
from import_export.instance_loaders import ModelInstanceLoader
from import_export.resources import Diff
from ..models import (
Author, Book, Category, Entry, Profile, WithDefault, WithDynamicDefault,
WithFloatField,
)
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
class MyResource(resources.Resource):
name = fields.Field()
email = fields.Field()
extra = fields.Field()
class Meta:
export_order = ('email', 'name')
class ResourceTestCase(TestCase):
def setUp(self):
self.my_resource = MyResource()
def test_fields(self):
fields = self.my_resource.fields
self.assertIn('name', fields)
def test_field_column_name(self):
field = self.my_resource.fields['name']
self.assertIn(field.column_name, 'name')
def test_meta(self):
self.assertIsInstance(self.my_resource._meta,
resources.ResourceOptions)
def test_get_export_order(self):
self.assertEqual(self.my_resource.get_export_headers(),
['email', 'name', 'extra'])
# Issue 140 Attributes aren't inherited by subclasses
def test_inheritance(self):
class A(MyResource):
inherited = fields.Field()
class Meta:
import_id_fields = ('email',)
class B(A):
local = fields.Field()
class Meta:
export_order = ('email', 'extra')
resource = B()
self.assertIn('name', resource.fields)
self.assertIn('inherited', resource.fields)
self.assertIn('local', resource.fields)
self.assertEqual(resource.get_export_headers(),
['email', 'extra', 'name', 'inherited', 'local'])
self.assertEqual(resource._meta.import_id_fields, ('email',))
def test_inheritance_with_custom_attributes(self):
class A(MyResource):
inherited = fields.Field()
class Meta:
import_id_fields = ('email',)
custom_attribute = True
class B(A):
local = fields.Field()
resource = B()
self.assertEqual(resource._meta.custom_attribute, True)
class AuthorResource(resources.ModelResource):
books = fields.Field(
column_name='books',
attribute='book_set',
readonly=True,
)
class Meta:
model = Author
export_order = ('name', 'books')
class BookResource(resources.ModelResource):
published = fields.Field(column_name='published_date')
class Meta:
model = Book
exclude = ('imported', )
class ProfileResource(resources.ModelResource):
class Meta:
model = Profile
exclude = ('user', )
class WithDefaultResource(resources.ModelResource):
class Meta:
model = WithDefault
fields = ('name',)
class ModelResourceTest(TestCase):
def setUp(self):
self.resource = BookResource()
self.book = Book.objects.create(name="Some book")
self.dataset = tablib.Dataset(headers=['id', 'name', 'author_email',
'price'])
row = [self.book.pk, 'Some book', 'test@example.com', "10.25"]
self.dataset.append(row)
def test_default_instance_loader_class(self):
self.assertIs(self.resource._meta.instance_loader_class,
ModelInstanceLoader)
def test_fields(self):
fields = self.resource.fields
self.assertIn('id', fields)
self.assertIn('name', fields)
self.assertIn('author_email', fields)
self.assertIn('price', fields)
def test_fields_foreign_key(self):
fields = self.resource.fields
self.assertIn('author', fields)
widget = fields['author'].widget
self.assertIsInstance(widget, widgets.ForeignKeyWidget)
self.assertEqual(widget.model, Author)
def test_fields_m2m(self):
fields = self.resource.fields
self.assertIn('categories', fields)
def test_excluded_fields(self):
self.assertNotIn('imported', self.resource.fields)
def test_init_instance(self):
instance = self.resource.init_instance()
self.assertIsInstance(instance, Book)
def test_default(self):
self.assertEquals(WithDefaultResource.fields['name'].clean({'name': ''}), 'foo_bar')
def test_get_instance(self):
instance_loader = self.resource._meta.instance_loader_class(
self.resource)
self.resource._meta.import_id_fields = ['id']
instance = self.resource.get_instance(instance_loader,
self.dataset.dict[0])
self.assertEqual(instance, self.book)
def test_get_instance_import_id_fields(self):
class BookResource(resources.ModelResource):
name = fields.Field(attribute='name', widget=widgets.CharWidget())
class Meta:
model = Book
import_id_fields = ['name']
resource = BookResource()
instance_loader = resource._meta.instance_loader_class(resource)
instance = resource.get_instance(instance_loader, self.dataset.dict[0])
self.assertEqual(instance, self.book)
def test_get_instance_with_missing_field_data(self):
instance_loader = self.resource._meta.instance_loader_class(
self.resource)
# construct a dataset with a missing "id" column
dataset = tablib.Dataset(headers=['name', 'author_email', 'price'])
dataset.append(['Some book', 'test@example.com', "10.25"])
with self.assertRaises(KeyError) as cm:
self.resource.get_instance(instance_loader, dataset.dict[0])
self.assertEqual(u"Column 'id' not found in dataset. Available columns "
"are: %s" % [u'name', u'author_email', u'price'],
cm.exception.args[0])
def test_get_export_headers(self):
headers = self.resource.get_export_headers()
self.assertEqual(headers, ['published_date', 'id', 'name', 'author',
'author_email', 'published_time', 'price',
'categories', ])
def test_export(self):
dataset = self.resource.export(Book.objects.all())
self.assertEqual(len(dataset), 1)
def test_export_iterable(self):
dataset = self.resource.export(list(Book.objects.all()))
self.assertEqual(len(dataset), 1)
def test_get_diff(self):
diff = Diff(self.resource, self.book, False)
book2 = Book(name="Some other book")
diff.compare_with(self.resource, book2)
html = diff.as_html()
headers = self.resource.get_export_headers()
self.assertEqual(html[headers.index('name')],
u'<span>Some </span><ins style="background:#e6ffe6;">'
u'other </ins><span>book</span>')
self.assertFalse(html[headers.index('author_email')])
@skip("See: https://github.com/django-import-export/django-import-export/issues/311")
def test_get_diff_with_callable_related_manager(self):
resource = AuthorResource()
author = Author(name="Some author")
author.save()
author2 = Author(name="Some author")
self.book.author = author
self.book.save()
diff = Diff(self.resource, author, False)
diff.compare_with(self.resource, author2)
html = diff.as_html()
headers = resource.get_export_headers()
self.assertEqual(html[headers.index('books')],
'<span>core.Book.None</span>')
def test_import_data(self):
result = self.resource.import_data(self.dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), 1)
self.assertTrue(result.rows[0].diff)
self.assertEqual(result.rows[0].import_type,
results.RowResult.IMPORT_TYPE_UPDATE)
instance = Book.objects.get(pk=self.book.pk)
self.assertEqual(instance.author_email, 'test@example.com')
self.assertEqual(instance.price, Decimal("10.25"))
def test_import_data_value_error_includes_field_name(self):
class AuthorResource(resources.ModelResource):
class Meta:
model = Author
resource = AuthorResource()
dataset = tablib.Dataset(headers=['id', 'name', 'birthday'])
dataset.append(['', 'A.A.Milne', '1882test-01-18'])
result = resource.import_data(dataset, raise_errors=False)
self.assertTrue(result.has_errors())
self.assertTrue(result.rows[0].errors)
msg = ("Column 'birthday': Enter a valid date/time.")
actual = result.rows[0].errors[0].error
self.assertIsInstance(actual, ValueError)
self.assertEqual(msg, str(actual))
def test_import_data_error_saving_model(self):
row = list(self.dataset.pop())
# set pk to something that would yield error
row[0] = 'foo'
self.dataset.append(row)
result = self.resource.import_data(self.dataset, raise_errors=False)
self.assertTrue(result.has_errors())
self.assertTrue(result.rows[0].errors)
actual = result.rows[0].errors[0].error
self.assertIsInstance(actual, ValueError)
self.assertIn("Column 'id': could not convert string to float",
str(actual))
def test_import_data_delete(self):
class B(BookResource):
delete = fields.Field(widget=widgets.BooleanWidget())
def for_delete(self, row, instance):
return self.fields['delete'].clean(row)
row = [self.book.pk, self.book.name, '1']
dataset = tablib.Dataset(*[row], headers=['id', 'name', 'delete'])
result = B().import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(result.rows[0].import_type,
results.RowResult.IMPORT_TYPE_DELETE)
self.assertFalse(Book.objects.filter(pk=self.book.pk))
def test_save_instance_with_dry_run_flag(self):
class B(BookResource):
def before_save_instance(self, instance, using_transactions, dry_run):
super(B, self).before_save_instance(instance, using_transactions, dry_run)
if dry_run:
self.before_save_instance_dry_run = True
else:
self.before_save_instance_dry_run = False
def save_instance(self, instance, using_transactions=True, dry_run=False):
super(B, self).save_instance(instance, using_transactions, dry_run)
if dry_run:
self.save_instance_dry_run = True
else:
self.save_instance_dry_run = False
def after_save_instance(self, instance, using_transactions, dry_run):
super(B, self).after_save_instance(instance, using_transactions, dry_run)
if dry_run:
self.after_save_instance_dry_run = True
else:
self.after_save_instance_dry_run = False
resource = B()
resource.import_data(self.dataset, dry_run=True, raise_errors=True)
self.assertTrue(resource.before_save_instance_dry_run)
self.assertTrue(resource.save_instance_dry_run)
self.assertTrue(resource.after_save_instance_dry_run)
resource.import_data(self.dataset, dry_run=False, raise_errors=True)
self.assertFalse(resource.before_save_instance_dry_run)
self.assertFalse(resource.save_instance_dry_run)
self.assertFalse(resource.after_save_instance_dry_run)
def test_delete_instance_with_dry_run_flag(self):
class B(BookResource):
delete = fields.Field(widget=widgets.BooleanWidget())
def for_delete(self, row, instance):
return self.fields['delete'].clean(row)
def before_delete_instance(self, instance, dry_run):
super(B, self).before_delete_instance(instance, dry_run)
if dry_run:
self.before_delete_instance_dry_run = True
else:
self.before_delete_instance_dry_run = False
def delete_instance(self, instance, using_transactions=True, dry_run=False):
super(B, self).delete_instance(instance, using_transactions, dry_run)
if dry_run:
self.delete_instance_dry_run = True
else:
self.delete_instance_dry_run = False
def after_delete_instance(self, instance, dry_run):
super(B, self).after_delete_instance(instance, dry_run)
if dry_run:
self.after_delete_instance_dry_run = True
else:
self.after_delete_instance_dry_run = False
resource = B()
row = [self.book.pk, self.book.name, '1']
dataset = tablib.Dataset(*[row], headers=['id', 'name', 'delete'])
resource.import_data(dataset, dry_run=True, raise_errors=True)
self.assertTrue(resource.before_delete_instance_dry_run)
self.assertTrue(resource.delete_instance_dry_run)
self.assertTrue(resource.after_delete_instance_dry_run)
resource.import_data(dataset, dry_run=False, raise_errors=True)
self.assertFalse(resource.before_delete_instance_dry_run)
self.assertFalse(resource.delete_instance_dry_run)
self.assertFalse(resource.after_delete_instance_dry_run)
def test_relationships_fields(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('author__name',)
author = Author.objects.create(name="Author")
self.book.author = author
resource = B()
result = resource.fields['author__name'].export(self.book)
self.assertEqual(result, author.name)
def test_dehydrating_fields(self):
class B(resources.ModelResource):
full_title = fields.Field(column_name="Full title")
class Meta:
model = Book
fields = ('author__name', 'full_title')
def dehydrate_full_title(self, obj):
return '%s by %s' % (obj.name, obj.author.name)
author = Author.objects.create(name="Author")
self.book.author = author
resource = B()
full_title = resource.export_field(resource.get_fields()[0], self.book)
self.assertEqual(full_title, '%s by %s' % (self.book.name,
self.book.author.name))
def test_widget_fomat_in_fk_field(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('author__birthday',)
widgets = {
'author__birthday': {'format': '%Y-%m-%d'},
}
author = Author.objects.create(name="Author")
self.book.author = author
resource = B()
result = resource.fields['author__birthday'].export(self.book)
self.assertEqual(result, str(date.today()))
def test_widget_kwargs_for_field(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('published',)
widgets = {
'published': {'format': '%d.%m.%Y'},
}
resource = B()
self.book.published = date(2012, 8, 13)
result = resource.fields['published'].export(self.book)
self.assertEqual(result, "13.08.2012")
def test_foreign_keys_export(self):
author1 = Author.objects.create(name='Foo')
self.book.author = author1
self.book.save()
dataset = self.resource.export(Book.objects.all())
self.assertEqual(dataset.dict[0]['author'], author1.pk)
def test_foreign_keys_import(self):
author2 = Author.objects.create(name='Bar')
headers = ['id', 'name', 'author']
row = [None, 'FooBook', author2.pk]
dataset = tablib.Dataset(row, headers=headers)
self.resource.import_data(dataset, raise_errors=True)
book = Book.objects.get(name='FooBook')
self.assertEqual(book.author, author2)
def test_m2m_export(self):
cat1 = Category.objects.create(name='Cat 1')
cat2 = Category.objects.create(name='Cat 2')
self.book.categories.add(cat1)
self.book.categories.add(cat2)
dataset = self.resource.export(Book.objects.all())
self.assertEqual(dataset.dict[0]['categories'],
'%d,%d' % (cat1.pk, cat2.pk))
def test_m2m_import(self):
cat1 = Category.objects.create(name='Cat 1')
headers = ['id', 'name', 'categories']
row = [None, 'FooBook', "%s" % cat1.pk]
dataset = tablib.Dataset(row, headers=headers)
self.resource.import_data(dataset, raise_errors=True)
book = Book.objects.get(name='FooBook')
self.assertIn(cat1, book.categories.all())
def test_m2m_options_import(self):
cat1 = Category.objects.create(name='Cat 1')
cat2 = Category.objects.create(name='Cat 2')
headers = ['id', 'name', 'categories']
row = [None, 'FooBook', "Cat 1|Cat 2"]
dataset = tablib.Dataset(row, headers=headers)
class BookM2MResource(resources.ModelResource):
categories = fields.Field(
attribute='categories',
widget=widgets.ManyToManyWidget(Category, field='name',
separator='|')
)
class Meta:
model = Book
resource = BookM2MResource()
resource.import_data(dataset, raise_errors=True)
book = Book.objects.get(name='FooBook')
self.assertIn(cat1, book.categories.all())
self.assertIn(cat2, book.categories.all())
def test_related_one_to_one(self):
# issue #17 - Exception when attempting access something on the
# related_name
user = User.objects.create(username='foo')
profile = Profile.objects.create(user=user)
Entry.objects.create(user=user)
Entry.objects.create(user=User.objects.create(username='bar'))
class EntryResource(resources.ModelResource):
class Meta:
model = Entry
fields = ('user__profile', 'user__profile__is_private')
resource = EntryResource()
dataset = resource.export(Entry.objects.all())
self.assertEqual(dataset.dict[0]['user__profile'], profile.pk)
self.assertEqual(dataset.dict[0]['user__profile__is_private'], '1')
self.assertEqual(dataset.dict[1]['user__profile'], '')
self.assertEqual(dataset.dict[1]['user__profile__is_private'], '')
def test_empty_get_queryset(self):
# issue #25 - Overriding queryset on export() fails when passed
# queryset has zero elements
dataset = self.resource.export(Book.objects.none())
self.assertEqual(len(dataset), 0)
def test_import_data_skip_unchanged(self):
def attempted_save(instance, real_dry_run):
self.fail('Resource attempted to save instead of skipping')
# Make sure we test with ManyToMany related objects
cat1 = Category.objects.create(name='Cat 1')
cat2 = Category.objects.create(name='Cat 2')
self.book.categories.add(cat1)
self.book.categories.add(cat2)
dataset = self.resource.export()
# Create a new resource that attempts to reimport the data currently
# in the database while skipping unchanged rows (i.e. all of them)
resource = deepcopy(self.resource)
resource._meta.skip_unchanged = True
# Fail the test if the resource attempts to save the row
resource.save_instance = attempted_save
result = resource.import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), len(dataset))
self.assertTrue(result.rows[0].diff)
self.assertEqual(result.rows[0].import_type,
results.RowResult.IMPORT_TYPE_SKIP)
# Test that we can suppress reporting of skipped rows
resource._meta.report_skipped = False
result = resource.import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), 0)
def test_before_import_access_to_kwargs(self):
class B(BookResource):
def before_import(self, dataset, using_transactions, dry_run, **kwargs):
if 'extra_arg' in kwargs:
dataset.headers[dataset.headers.index('author_email')] = 'old_email'
dataset.insert_col(0,
lambda row: kwargs['extra_arg'],
header='author_email')
resource = B()
result = resource.import_data(self.dataset, raise_errors=True,
extra_arg='extra@example.com')
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), 1)
instance = Book.objects.get(pk=self.book.pk)
self.assertEqual(instance.author_email, 'extra@example.com')
def test_link_to_nonexistent_field(self):
with self.assertRaises(FieldDoesNotExist) as cm:
class BrokenBook1(resources.ModelResource):
class Meta:
model = Book
fields = ('nonexistent__invalid',)
self.assertEqual("Book.nonexistent: Book has no field named 'nonexistent'",
cm.exception.args[0])
with self.assertRaises(FieldDoesNotExist) as cm:
class BrokenBook2(resources.ModelResource):
class Meta:
model = Book
fields = ('author__nonexistent',)
self.assertEqual("Book.author.nonexistent: Author has no field named "
"'nonexistent'", cm.exception.args[0])
def test_link_to_nonrelation_field(self):
with self.assertRaises(KeyError) as cm:
class BrokenBook1(resources.ModelResource):
class Meta:
model = Book
fields = ('published__invalid',)
self.assertEqual("Book.published is not a relation",
cm.exception.args[0])
with self.assertRaises(KeyError) as cm:
class BrokenBook2(resources.ModelResource):
class Meta:
model = Book
fields = ('author__name__invalid',)
self.assertEqual("Book.author.name is not a relation",
cm.exception.args[0])
def test_override_field_construction_in_resource(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('published',)
@classmethod
def field_from_django_field(self, field_name, django_field,
readonly):
if field_name == 'published':
return {'sound': 'quack'}
B()
self.assertEqual({'sound': 'quack'}, B.fields['published'])
def test_readonly_annotated_field_import_and_export(self):
class B(BookResource):
total_categories = fields.Field('total_categories', readonly=True)
class Meta:
model = Book
skip_unchanged = True
cat1 = Category.objects.create(name='Cat 1')
self.book.categories.add(cat1)
resource = B()
# Verify that the annotated field is correctly exported
dataset = resource.export(
Book.objects.annotate(total_categories=Count('categories')))
self.assertEqual(int(dataset.dict[0]['total_categories']), 1)
# Verify that importing the annotated field raises no errors and that
# the rows are skipped
result = resource.import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), len(dataset))
self.assertEqual(
result.rows[0].import_type, results.RowResult.IMPORT_TYPE_SKIP)
def test_follow_relationship_for_modelresource(self):
class EntryResource(resources.ModelResource):
username = fields.Field(attribute='user__username', readonly=False)
class Meta:
model = Entry
fields = ('id', )
def after_save_instance(self, instance, using_transactions, dry_run):
if not using_transactions and dry_run:
# we don't have transactions and we want to do a dry_run
pass
else:
instance.user.save()
user = User.objects.create(username='foo')
entry = Entry.objects.create(user=user)
row = [
entry.pk,
'bar',
]
self.dataset = tablib.Dataset(headers=['id', 'username'])
self.dataset.append(row)
result = EntryResource().import_data(
self.dataset, raise_errors=True, dry_run=False)
self.assertFalse(result.has_errors())
self.assertEquals(User.objects.get(pk=user.pk).username, 'bar')
def test_import_data_dynamic_default_callable(self):
class DynamicDefaultResource(resources.ModelResource):
class Meta:
model = WithDynamicDefault
fields = ('id', 'name',)
self.assertTrue(callable(DynamicDefaultResource.fields['name'].default))
resource = DynamicDefaultResource()
dataset = tablib.Dataset(headers=['id', 'name', ])
dataset.append([1, None])
dataset.append([2, None])
resource.import_data(dataset, raise_errors=False)
objs = WithDynamicDefault.objects.all()
self.assertNotEqual(objs[0].name, objs[1].name)
def test_float_field(self):
#433
class R(resources.ModelResource):
class Meta:
model = WithFloatField
resource = R()
dataset = tablib.Dataset(headers=['id', 'f', ])
dataset.append([None, None])
dataset.append([None, ''])
resource.import_data(dataset, raise_errors=True)
self.assertEqual(WithFloatField.objects.all()[0].f, None)
self.assertEqual(WithFloatField.objects.all()[1].f, None)
class ModelResourceTransactionTest(TransactionTestCase):
@skipUnlessDBFeature('supports_transactions')
def test_m2m_import_with_transactions(self):
resource = BookResource()
cat1 = Category.objects.create(name='Cat 1')
headers = ['id', 'name', 'categories']
row = [None, 'FooBook', "%s" % cat1.pk]
dataset = tablib.Dataset(row, headers=headers)
result = resource.import_data(
dataset, dry_run=True, use_transactions=True
)
row_diff = result.rows[0].diff
fields = resource.get_fields()
id_field = resource.fields['id']
id_diff = row_diff[fields.index(id_field)]
# id diff should exist because in rollbacked transaction
# FooBook has been saved
self.assertTrue(id_diff)
category_field = resource.fields['categories']
categories_diff = row_diff[fields.index(category_field)]
self.assertEqual(strip_tags(categories_diff), force_text(cat1.pk))
# check that it is really rollbacked
self.assertFalse(Book.objects.filter(name='FooBook'))
@skipUnlessDBFeature('supports_transactions')
def test_m2m_import_with_transactions_error(self):
resource = ProfileResource()
headers = ['id', 'user']
# 'user' is a required field, the database will raise an error.
row = [None, None]
dataset = tablib.Dataset(row, headers=headers)
result = resource.import_data(
dataset, dry_run=True, use_transactions=True
)
# Ensure the error raised by the database has been saved.
self.assertTrue(result.has_errors())
# Ensure the rollback has worked properly.
self.assertEqual(Profile.objects.count(), 0)
class ModelResourceFactoryTest(TestCase):
def test_create(self):
BookResource = resources.modelresource_factory(Book)
self.assertIn('id', BookResource.fields)
self.assertEqual(BookResource._meta.model, Book)
@skipUnless(
'postgresql' in settings.DATABASES['default']['ENGINE'],
'Run only against Postgres')
class PostgresTests(TransactionTestCase):
# Make sure to start the sequences back at 1
reset_sequences = True
def test_create_object_after_importing_dataset_with_id(self):
dataset = tablib.Dataset(headers=['id', 'name'])
dataset.append([1, 'Some book'])
resource = BookResource()
result = resource.import_data(dataset)
self.assertFalse(result.has_errors())
try:
Book.objects.create(name='Some other book')
except IntegrityError:
self.fail('IntegrityError was raised.')
def test_collect_failed_rows(self):
resource = ProfileResource()
headers = ['id', 'user']
# 'user' is a required field, the database will raise an error.
row = [None, None]
dataset = tablib.Dataset(row, headers=headers)
result = resource.import_data(
dataset, dry_run=True, use_transactions=True,
collect_failed_rows=True,
)
self.assertEqual(
result.failed_dataset.headers,
[u'id', u'user', u'Error']
)
self.assertEqual(len(result.failed_dataset), 1)
# We can't check the error message because it's package- and version-dependent
if VERSION >= (1, 8) and 'postgresql' in settings.DATABASES['default']['ENGINE']:
from django.contrib.postgres.fields import ArrayField
from django.db import models
class BookWithChapters(models.Model):
name = models.CharField('Book name', max_length=100)
chapters = ArrayField(models.CharField(max_length=100), default=list)
class ArrayFieldTest(TestCase):
fixtures = []
def setUp(self):
pass
def test_arrayfield(self):
dataset_headers = ["id", "name", "chapters"]
chapters = ["Introduction", "Middle Chapter", "Ending"]
dataset_row = ["1", "Book With Chapters", ",".join(chapters)]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
book_with_chapters_resource = resources.modelresource_factory(model=BookWithChapters)()
result = book_with_chapters_resource.import_data(dataset, dry_run=False)
self.assertFalse(result.has_errors())
book_with_chapters = list(BookWithChapters.objects.all())[0]
self.assertListEqual(book_with_chapters.chapters, chapters)
class ManyRelatedManagerDiffTest(TestCase):
fixtures = ["category"]
def setUp(self):
pass
def test_related_manager_diff(self):
dataset_headers = ["id", "name", "categories"]
dataset_row = ["1", "Test Book", "1"]
original_dataset = tablib.Dataset(headers=dataset_headers)
original_dataset.append(dataset_row)
dataset_row[2] = "2"
changed_dataset = tablib.Dataset(headers=dataset_headers)
changed_dataset.append(dataset_row)
book_resource = BookResource()
export_headers = book_resource.get_export_headers()
add_result = book_resource.import_data(original_dataset, dry_run=False)
expected_value = u'<ins style="background:#e6ffe6;">1</ins>'
self.check_value(add_result, export_headers, expected_value)
change_result = book_resource.import_data(changed_dataset, dry_run=False)
expected_value = u'<del style="background:#ffe6e6;">1</del><ins style="background:#e6ffe6;">2</ins>'
self.check_value(change_result, export_headers, expected_value)
def check_value(self, result, export_headers, expected_value):
self.assertEqual(len(result.rows), 1)
diff = result.rows[0].diff
self.assertEqual(diff[export_headers.index("categories")],
expected_value)
| |
import warnings
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises, assert_almost_equal)
import skimage
from skimage import data
from skimage import exposure
from skimage.exposure.exposure import intensity_range
from skimage.color import rgb2gray
from skimage.util.dtype import dtype_range
from skimage._shared._warnings import expected_warnings
# Test integer histograms
# =======================
def test_negative_overflow():
im = np.array([-1, 127], dtype=np.int8)
frequencies, bin_centers = exposure.histogram(im)
assert_array_equal(bin_centers, np.arange(-1, 128))
assert frequencies[0] == 1
assert frequencies[-1] == 1
assert_array_equal(frequencies[1:-1], 0)
def test_all_negative_image():
im = np.array([-128, -1], dtype=np.int8)
frequencies, bin_centers = exposure.histogram(im)
assert_array_equal(bin_centers, np.arange(-128, 0))
assert frequencies[0] == 1
assert frequencies[-1] == 1
assert_array_equal(frequencies[1:-1], 0)
# Test histogram equalization
# ===========================
np.random.seed(0)
test_img_int = data.camera()
# squeeze image intensities to lower image contrast
test_img = skimage.img_as_float(test_img_int)
test_img = exposure.rescale_intensity(test_img / 5. + 100)
def test_equalize_uint8_approx():
"""Check integer bins used for uint8 images."""
img_eq0 = exposure.equalize_hist(test_img_int)
img_eq1 = exposure.equalize_hist(test_img_int, nbins=3)
np.testing.assert_allclose(img_eq0, img_eq1)
def test_equalize_ubyte():
with expected_warnings(['precision loss']):
img = skimage.img_as_ubyte(test_img)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
check_cdf_slope(cdf)
def test_equalize_float():
img = skimage.img_as_float(test_img)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
check_cdf_slope(cdf)
def test_equalize_masked():
img = skimage.img_as_float(test_img)
mask = np.zeros(test_img.shape)
mask[50:150, 50:250] = 1
img_mask_eq = exposure.equalize_hist(img, mask=mask)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_mask_eq)
check_cdf_slope(cdf)
assert not (img_eq == img_mask_eq).all()
def check_cdf_slope(cdf):
"""Slope of cdf which should equal 1 for an equalized histogram."""
norm_intensity = np.linspace(0, 1, len(cdf))
slope, intercept = np.polyfit(norm_intensity, cdf, 1)
assert 0.9 < slope < 1.1
# Test intensity range
# ====================
def test_intensity_range_uint8():
image = np.array([0, 1], dtype=np.uint8)
input_and_expected = [('image', [0, 1]),
('dtype', [0, 255]),
((10, 20), [10, 20])]
for range_values, expected_values in input_and_expected:
out = intensity_range(image, range_values=range_values)
yield assert_array_equal, out, expected_values
def test_intensity_range_float():
image = np.array([0.1, 0.2], dtype=np.float64)
input_and_expected = [('image', [0.1, 0.2]),
('dtype', [-1, 1]),
((0.3, 0.4), [0.3, 0.4])]
for range_values, expected_values in input_and_expected:
out = intensity_range(image, range_values=range_values)
yield assert_array_equal, out, expected_values
def test_intensity_range_clipped_float():
image = np.array([0.1, 0.2], dtype=np.float64)
out = intensity_range(image, range_values='dtype', clip_negative=True)
assert_array_equal(out, (0, 1))
# Test rescale intensity
# ======================
uint10_max = 2**10 - 1
uint12_max = 2**12 - 1
uint14_max = 2**14 - 1
uint16_max = 2**16 - 1
def test_rescale_stretch():
image = np.array([51, 102, 153], dtype=np.uint8)
out = exposure.rescale_intensity(image)
assert out.dtype == np.uint8
assert_array_almost_equal(out, [0, 127, 255])
def test_rescale_shrink():
image = np.array([51., 102., 153.])
out = exposure.rescale_intensity(image)
assert_array_almost_equal(out, [0, 0.5, 1])
def test_rescale_in_range():
image = np.array([51., 102., 153.])
out = exposure.rescale_intensity(image, in_range=(0, 255))
assert_array_almost_equal(out, [0.2, 0.4, 0.6])
def test_rescale_in_range_clip():
image = np.array([51., 102., 153.])
out = exposure.rescale_intensity(image, in_range=(0, 102))
assert_array_almost_equal(out, [0.5, 1, 1])
def test_rescale_out_range():
image = np.array([-10, 0, 10], dtype=np.int8)
out = exposure.rescale_intensity(image, out_range=(0, 127))
assert out.dtype == np.int8
assert_array_almost_equal(out, [0, 63, 127])
def test_rescale_named_in_range():
image = np.array([0, uint10_max, uint10_max + 100], dtype=np.uint16)
out = exposure.rescale_intensity(image, in_range='uint10')
assert_array_almost_equal(out, [0, uint16_max, uint16_max])
def test_rescale_named_out_range():
image = np.array([0, uint16_max], dtype=np.uint16)
out = exposure.rescale_intensity(image, out_range='uint10')
assert_array_almost_equal(out, [0, uint10_max])
def test_rescale_uint12_limits():
image = np.array([0, uint16_max], dtype=np.uint16)
out = exposure.rescale_intensity(image, out_range='uint12')
assert_array_almost_equal(out, [0, uint12_max])
def test_rescale_uint14_limits():
image = np.array([0, uint16_max], dtype=np.uint16)
out = exposure.rescale_intensity(image, out_range='uint14')
assert_array_almost_equal(out, [0, uint14_max])
# Test adaptive histogram equalization
# ====================================
def test_adapthist_scalar():
"""Test a scalar uint8 image
"""
img = skimage.img_as_ubyte(data.moon())
adapted = exposure.equalize_adapthist(img, kernel_size=64, clip_limit=0.02)
assert adapted.min() == 0.0
assert adapted.max() == 1.0
assert img.shape == adapted.shape
full_scale = skimage.exposure.rescale_intensity(skimage.img_as_float(img))
assert_almost_equal(peak_snr(full_scale, adapted), 102.066, 3)
assert_almost_equal(norm_brightness_err(full_scale, adapted),
0.038, 3)
def test_adapthist_grayscale():
"""Test a grayscale float image
"""
img = skimage.img_as_float(data.astronaut())
img = rgb2gray(img)
img = np.dstack((img, img, img))
with expected_warnings(['precision loss|non-contiguous input']):
adapted = exposure.equalize_adapthist(img, kernel_size=(57, 51),
clip_limit=0.01, nbins=128)
assert img.shape == adapted.shape
assert_almost_equal(peak_snr(img, adapted), 102.078, 3)
assert_almost_equal(norm_brightness_err(img, adapted), 0.0529, 3)
def test_adapthist_color():
"""Test an RGB color uint16 image
"""
img = skimage.img_as_uint(data.astronaut())
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
hist, bin_centers = exposure.histogram(img)
assert len(w) > 0
with expected_warnings(['precision loss']):
adapted = exposure.equalize_adapthist(img, clip_limit=0.01)
assert adapted.min() == 0
assert adapted.max() == 1.0
assert img.shape == adapted.shape
full_scale = skimage.exposure.rescale_intensity(img)
assert_almost_equal(peak_snr(full_scale, adapted), 109.393, 1)
assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.02, 2)
return data, adapted
def test_adapthist_alpha():
"""Test an RGBA color image
"""
img = skimage.img_as_float(data.astronaut())
alpha = np.ones((img.shape[0], img.shape[1]), dtype=float)
img = np.dstack((img, alpha))
with expected_warnings(['precision loss']):
adapted = exposure.equalize_adapthist(img)
assert adapted.shape != img.shape
img = img[:, :, :3]
full_scale = skimage.exposure.rescale_intensity(img)
assert img.shape == adapted.shape
assert_almost_equal(peak_snr(full_scale, adapted), 109.393, 2)
assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.0248, 3)
def test_adapthist_ntiles_raises():
img = skimage.img_as_ubyte(data.moon())
assert_raises(ValueError, exposure.equalize_adapthist, img, ntiles_x=8)
assert_raises(ValueError, exposure.equalize_adapthist, img, ntiles_y=8)
assert_raises(ValueError, exposure.equalize_adapthist, img,
ntiles_x=8, ntiles_y=8)
def peak_snr(img1, img2):
"""Peak signal to noise ratio of two images
Parameters
----------
img1 : array-like
img2 : array-like
Returns
-------
peak_snr : float
Peak signal to noise ratio
"""
if img1.ndim == 3:
img1, img2 = rgb2gray(img1.copy()), rgb2gray(img2.copy())
img1 = skimage.img_as_float(img1)
img2 = skimage.img_as_float(img2)
mse = 1. / img1.size * np.square(img1 - img2).sum()
_, max_ = dtype_range[img1.dtype.type]
return 20 * np.log(max_ / mse)
def norm_brightness_err(img1, img2):
"""Normalized Absolute Mean Brightness Error between two images
Parameters
----------
img1 : array-like
img2 : array-like
Returns
-------
norm_brightness_error : float
Normalized absolute mean brightness error
"""
if img1.ndim == 3:
img1, img2 = rgb2gray(img1), rgb2gray(img2)
ambe = np.abs(img1.mean() - img2.mean())
nbe = ambe / dtype_range[img1.dtype.type][1]
return nbe
# Test Gamma Correction
# =====================
def test_adjust_gamma_one():
"""Same image should be returned for gamma equal to one"""
image = np.random.uniform(0, 255, (8, 8))
result = exposure.adjust_gamma(image, 1)
assert_array_equal(result, image)
def test_adjust_gamma_zero():
"""White image should be returned for gamma equal to zero"""
image = np.random.uniform(0, 255, (8, 8))
result = exposure.adjust_gamma(image, 0)
dtype = image.dtype.type
assert_array_equal(result, dtype_range[dtype][1])
def test_adjust_gamma_less_one():
"""Verifying the output with expected results for gamma
correction with gamma equal to half"""
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([
[ 0, 31, 45, 55, 63, 71, 78, 84],
[ 90, 95, 100, 105, 110, 115, 119, 123],
[127, 131, 135, 139, 142, 146, 149, 153],
[156, 159, 162, 165, 168, 171, 174, 177],
[180, 183, 186, 188, 191, 194, 196, 199],
[201, 204, 206, 209, 211, 214, 216, 218],
[221, 223, 225, 228, 230, 232, 234, 236],
[238, 241, 243, 245, 247, 249, 251, 253]], dtype=np.uint8)
result = exposure.adjust_gamma(image, 0.5)
assert_array_equal(result, expected)
def test_adjust_gamma_greater_one():
"""Verifying the output with expected results for gamma
correction with gamma equal to two"""
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([
[ 0, 0, 0, 0, 1, 1, 2, 3],
[ 4, 5, 6, 7, 9, 10, 12, 14],
[ 16, 18, 20, 22, 25, 27, 30, 33],
[ 36, 39, 42, 45, 49, 52, 56, 60],
[ 64, 68, 72, 76, 81, 85, 90, 95],
[100, 105, 110, 116, 121, 127, 132, 138],
[144, 150, 156, 163, 169, 176, 182, 189],
[196, 203, 211, 218, 225, 233, 241, 249]], dtype=np.uint8)
result = exposure.adjust_gamma(image, 2)
assert_array_equal(result, expected)
def test_adjust_gamma_neggative():
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
assert_raises(ValueError, exposure.adjust_gamma, image, -1)
# Test Logarithmic Correction
# ===========================
def test_adjust_log():
"""Verifying the output with expected results for logarithmic
correction with multiplier constant multiplier equal to unity"""
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([
[ 0, 5, 11, 16, 22, 27, 33, 38],
[ 43, 48, 53, 58, 63, 68, 73, 77],
[ 82, 86, 91, 95, 100, 104, 109, 113],
[117, 121, 125, 129, 133, 137, 141, 145],
[149, 153, 157, 160, 164, 168, 172, 175],
[179, 182, 186, 189, 193, 196, 199, 203],
[206, 209, 213, 216, 219, 222, 225, 228],
[231, 234, 238, 241, 244, 246, 249, 252]], dtype=np.uint8)
result = exposure.adjust_log(image, 1)
assert_array_equal(result, expected)
def test_adjust_inv_log():
"""Verifying the output with expected results for inverse logarithmic
correction with multiplier constant multiplier equal to unity"""
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([
[ 0, 2, 5, 8, 11, 14, 17, 20],
[ 23, 26, 29, 32, 35, 38, 41, 45],
[ 48, 51, 55, 58, 61, 65, 68, 72],
[ 76, 79, 83, 87, 90, 94, 98, 102],
[106, 110, 114, 118, 122, 126, 130, 134],
[138, 143, 147, 151, 156, 160, 165, 170],
[174, 179, 184, 188, 193, 198, 203, 208],
[213, 218, 224, 229, 234, 239, 245, 250]], dtype=np.uint8)
result = exposure.adjust_log(image, 1, True)
assert_array_equal(result, expected)
# Test Sigmoid Correction
# =======================
def test_adjust_sigmoid_cutoff_one():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to one and gain of 5"""
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([
[ 1, 1, 1, 2, 2, 2, 2, 2],
[ 3, 3, 3, 4, 4, 4, 5, 5],
[ 5, 6, 6, 7, 7, 8, 9, 10],
[ 10, 11, 12, 13, 14, 15, 16, 18],
[ 19, 20, 22, 24, 25, 27, 29, 32],
[ 34, 36, 39, 41, 44, 47, 50, 54],
[ 57, 61, 64, 68, 72, 76, 80, 85],
[ 89, 94, 99, 104, 108, 113, 118, 123]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 1, 5)
assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_zero():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to zero and gain of 10"""
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([
[127, 137, 147, 156, 166, 175, 183, 191],
[198, 205, 211, 216, 221, 225, 229, 232],
[235, 238, 240, 242, 244, 245, 247, 248],
[249, 250, 250, 251, 251, 252, 252, 253],
[253, 253, 253, 253, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 0, 10)
assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_half():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to half and gain of 10"""
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([
[ 1, 1, 2, 2, 3, 3, 4, 5],
[ 5, 6, 7, 9, 10, 12, 14, 16],
[ 19, 22, 25, 29, 34, 39, 44, 50],
[ 57, 64, 72, 80, 89, 99, 108, 118],
[128, 138, 148, 158, 167, 176, 184, 192],
[199, 205, 211, 217, 221, 226, 229, 233],
[236, 238, 240, 242, 244, 246, 247, 248],
[249, 250, 250, 251, 251, 252, 252, 253]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 0.5, 10)
assert_array_equal(result, expected)
def test_adjust_inv_sigmoid_cutoff_half():
"""Verifying the output with expected results for inverse sigmoid
correction with cutoff equal to half and gain of 10"""
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([
[253, 253, 252, 252, 251, 251, 250, 249],
[249, 248, 247, 245, 244, 242, 240, 238],
[235, 232, 229, 225, 220, 215, 210, 204],
[197, 190, 182, 174, 165, 155, 146, 136],
[126, 116, 106, 96, 87, 78, 70, 62],
[ 55, 49, 43, 37, 33, 28, 25, 21],
[ 18, 16, 14, 12, 10, 8, 7, 6],
[ 5, 4, 4, 3, 3, 2, 2, 1]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 0.5, 10, True)
assert_array_equal(result, expected)
def test_negative():
image = np.arange(-10, 245, 4).reshape((8, 8)).astype(np.double)
assert_raises(ValueError, exposure.adjust_gamma, image)
def test_is_low_contrast():
image = np.linspace(0, 0.04, 100)
assert exposure.is_low_contrast(image)
image[-1] = 1
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
image = (image * 255).astype(np.uint8)
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
image = (image.astype(np.uint16)) * 2**8
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import six.moves.urllib.parse as urlparse
import webob
from nova.api.openstack.compute import flavors as flavors_v2
from nova.api.openstack.compute.plugins.v3 import flavors as flavors_v3
from nova.api.openstack import xmlutil
import nova.compute.flavors
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"ephemeral_gb": '20',
"swap": '10',
"disabled": False,
"vcpus": '',
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '20',
"ephemeral_gb": '10',
"swap": '5',
"disabled": False,
"vcpus": '',
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
if marker in ['99999']:
raise exception.MarkerNotFound(marker)
def reject_min(db_attr, filter_attr):
return (filter_attr in filters and
int(flavor[db_attr]) < int(filters[filter_attr]))
filters = filters or {}
res = []
for (flavor_name, flavor) in FAKE_FLAVORS.items():
if reject_min('memory_mb', 'min_memory_mb'):
continue
elif reject_min('root_gb', 'min_root_gb'):
continue
res.append(flavor)
res = sorted(res, key=lambda item: item[sort_key])
output = []
marker_found = True if marker is None else False
for flavor in res:
if not marker_found and marker == flavor['flavorid']:
marker_found = True
elif marker_found:
if limit is None or len(output) < int(limit):
output.append(flavor)
return output
def empty_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return []
def return_flavor_not_found(flavor_id, ctxt=None):
raise exception.FlavorNotFound(flavor_id=flavor_id)
class FlavorsTestV21(test.TestCase):
api_version = "2.1"
_prefix = "/v3"
Controller = flavors_v3.FlavorsController
fake_request = fakes.HTTPRequestV3
_rspv = "v3"
_fake = ""
def setUp(self):
super(FlavorsTestV21, self).setUp()
self.flags(osapi_compute_extension=[])
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(nova.compute.flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
self.controller = self.Controller()
def test_get_flavor_by_invalid_id(self):
self.stubs.Set(nova.compute.flavors,
"get_flavor_by_flavor_id",
return_flavor_not_found)
req = self.fake_request.blank(self._prefix + '/flavors/asdf')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'asdf')
def test_get_flavor_by_id(self):
req = self.fake_request.blank(self._prefix + '/flavors/1')
flavor = self.controller.show(req, '1')
expected = {
"flavor": {
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
}
if self.api_version == "2.1":
expected['flavor']['ephemeral'] = '20'
expected['flavor']['swap'] = '10'
expected['flavor']['disabled'] = False
self.assertEqual(flavor, expected)
def test_get_flavor_with_custom_link_prefix(self):
self.flags(osapi_compute_link_prefix='http://zoo.com:42',
osapi_glance_link_prefix='http://circus.com:34')
req = self.fake_request.blank(self._prefix + '/flavors/1')
flavor = self.controller.show(req, '1')
expected = {
"flavor": {
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://zoo.com:42/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://zoo.com:42" + self._fake +
"/flavors/1",
},
],
},
}
if self.api_version == "2.1":
expected['flavor']['ephemeral'] = '20'
expected['flavor']['swap'] = '10'
expected['flavor']['disabled'] = False
self.assertEqual(flavor, expected)
def test_get_flavor_list(self):
req = self.fake_request.blank(self._prefix + '/flavors')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_with_marker(self):
self.maxDiff = None
url = self._prefix + '/flavors?limit=1&marker=1'
req = self.fake_request.blank(url)
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
'flavors_links': [
{'href': 'http://localhost/' + self._rspv +
'/flavors?limit=1&marker=2',
'rel': 'next'}
]
}
self.assertThat(flavor, matchers.DictMatches(expected))
def test_get_flavor_list_with_invalid_marker(self):
req = self.fake_request.blank(self._prefix + '/flavors?marker=99999')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_detail_with_limit(self):
url = self._prefix + '/flavors/detail?limit=1'
req = self.fake_request.blank(url)
response = self.controller.index(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
]
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['1'], 'marker': ['1']},
matchers.DictMatches(params))
def test_get_flavor_with_limit(self):
req = self.fake_request.blank(self._prefix + '/flavors?limit=2')
response = self.controller.index(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
}
]
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['2'], 'marker': ['2']},
matchers.DictMatches(params))
def test_get_flavor_list_detail(self):
req = self.fake_request.blank(self._prefix + '/flavors/detail')
flavor = self.controller.detail(req)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
if self.api_version == "2.1":
expected['flavors'][0]['ephemeral'] = '20'
expected['flavors'][0]['swap'] = '10'
expected['flavors'][0]['disabled'] = False
expected['flavors'][1]['ephemeral'] = '10'
expected['flavors'][1]['swap'] = '5'
expected['flavors'][1]['disabled'] = False
self.assertEqual(flavor, expected)
def test_get_empty_flavor_list(self):
self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
empty_get_all_flavors_sorted_list)
req = self.fake_request.blank(self._prefix + '/flavors')
flavors = self.controller.index(req)
expected = {'flavors': []}
self.assertEqual(flavors, expected)
def test_get_flavor_list_filter_min_ram(self):
# Flavor lists may be filtered by minRam.
req = self.fake_request.blank(self._prefix + '/flavors?minRam=512')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_ram(self):
# Ensure you cannot list flavors with invalid minRam param.
req = self.fake_request.blank(self._prefix + '/flavors?minRam=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_filter_min_disk(self):
# Flavor lists may be filtered by minDisk.
req = self.fake_request.blank(self._prefix + '/flavors?minDisk=20')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_disk(self):
# Ensure you cannot list flavors with invalid minDisk param.
req = self.fake_request.blank(self._prefix + '/flavors?minDisk=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_detail_min_ram_and_min_disk(self):
"""Tests that filtering work on flavor details and that minRam and
minDisk filters can be combined
"""
req = self.fake_request.blank(self._prefix + '/flavors/detail'
'?minRam=256&minDisk=20')
flavor = self.controller.detail(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
if self.api_version == "2.1":
expected['flavors'][0]['ephemeral'] = '10'
expected['flavors'][0]['swap'] = '5'
expected['flavors'][0]['disabled'] = False
self.assertEqual(flavor, expected)
class FlavorsTestV20(FlavorsTestV21):
api_version = "2.0"
_prefix = "/v2/fake"
Controller = flavors_v2.Controller
fake_request = fakes.HTTPRequest
_rspv = "v2/fake"
_fake = "/fake"
class FlavorsXMLSerializationTest(test.TestCase):
def test_xml_declaration(self):
serializer = flavors_v2.FlavorTemplate()
fixture = {
"flavor": {
"id": "12",
"name": "asdf",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/12",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/12",
},
],
},
}
output = serializer.serialize(fixture)
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_show(self):
serializer = flavors_v2.FlavorTemplate()
fixture = {
"flavor": {
"id": "12",
"name": "asdf",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/12",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/12",
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavor')
flavor_dict = fixture['flavor']
for key in ['name', 'id', 'ram', 'disk']:
self.assertEqual(root.get(key), str(flavor_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(flavor_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_handles_integers(self):
serializer = flavors_v2.FlavorTemplate()
fixture = {
"flavor": {
"id": 12,
"name": "asdf",
"ram": 256,
"disk": 10,
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/12",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/12",
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavor')
flavor_dict = fixture['flavor']
for key in ['name', 'id', 'ram', 'disk']:
self.assertEqual(root.get(key), str(flavor_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(flavor_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_detail(self):
serializer = flavors_v2.FlavorsTemplate()
fixture = {
"flavors": [
{
"id": "23",
"name": "flavor 23",
"ram": "512",
"disk": "20",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/23",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/23",
},
],
},
{
"id": "13",
"name": "flavor 13",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/13",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/13",
},
],
},
],
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavors')
flavor_elems = root.findall('{0}flavor'.format(NS))
self.assertEqual(len(flavor_elems), 2)
for i, flavor_elem in enumerate(flavor_elems):
flavor_dict = fixture['flavors'][i]
for key in ['name', 'id', 'ram', 'disk']:
self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(flavor_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_index(self):
serializer = flavors_v2.MinimalFlavorsTemplate()
fixture = {
"flavors": [
{
"id": "23",
"name": "flavor 23",
"ram": "512",
"disk": "20",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/23",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/23",
},
],
},
{
"id": "13",
"name": "flavor 13",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/13",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/13",
},
],
},
],
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavors_index')
flavor_elems = root.findall('{0}flavor'.format(NS))
self.assertEqual(len(flavor_elems), 2)
for i, flavor_elem in enumerate(flavor_elems):
flavor_dict = fixture['flavors'][i]
for key in ['name', 'id']:
self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(flavor_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_index_empty(self):
serializer = flavors_v2.MinimalFlavorsTemplate()
fixture = {
"flavors": [],
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'flavors_index')
flavor_elems = root.findall('{0}flavor'.format(NS))
self.assertEqual(len(flavor_elems), 0)
class DisabledFlavorsWithRealDBTestV21(test.TestCase):
"""Tests that disabled flavors should not be shown nor listed."""
Controller = flavors_v3.FlavorsController
api_version = "2.1"
_prefix = "/v3"
fake_request = fakes.HTTPRequestV3
def setUp(self):
super(DisabledFlavorsWithRealDBTestV21, self).setUp()
# Add a new disabled type to the list of flavors
self.req = self.fake_request.blank(self._prefix + '/flavors')
self.context = self.req.environ['nova.context']
self.admin_context = context.get_admin_context()
self.disabled_type = self._create_disabled_instance_type()
self.inst_types = db.flavor_get_all(
self.admin_context)
self.controller = self.Controller()
def tearDown(self):
db.flavor_destroy(
self.admin_context, self.disabled_type['name'])
super(DisabledFlavorsWithRealDBTestV21, self).tearDown()
def _create_disabled_instance_type(self):
inst_types = db.flavor_get_all(self.admin_context)
inst_type = inst_types[0]
del inst_type['id']
inst_type['name'] += '.disabled'
inst_type['flavorid'] = unicode(max(
[int(flavor['flavorid']) for flavor in inst_types]) + 1)
inst_type['disabled'] = True
disabled_type = db.flavor_create(
self.admin_context, inst_type)
return disabled_type
def test_index_should_not_list_disabled_flavors_to_user(self):
self.context.is_admin = False
flavor_list = self.controller.index(self.req)['flavors']
api_flavorids = set(f['id'] for f in flavor_list)
db_flavorids = set(i['flavorid'] for i in self.inst_types)
disabled_flavorid = str(self.disabled_type['flavorid'])
self.assertIn(disabled_flavorid, db_flavorids)
self.assertEqual(db_flavorids - set([disabled_flavorid]),
api_flavorids)
def test_index_should_list_disabled_flavors_to_admin(self):
self.context.is_admin = True
flavor_list = self.controller.index(self.req)['flavors']
api_flavorids = set(f['id'] for f in flavor_list)
db_flavorids = set(i['flavorid'] for i in self.inst_types)
disabled_flavorid = str(self.disabled_type['flavorid'])
self.assertIn(disabled_flavorid, db_flavorids)
self.assertEqual(db_flavorids, api_flavorids)
def test_show_should_include_disabled_flavor_for_user(self):
"""Counterintuitively we should show disabled flavors to all users and
not just admins. The reason is that, when a user performs a server-show
request, we want to be able to display the pretty flavor name ('512 MB
Instance') and not just the flavor-id even if the flavor id has been
marked disabled.
"""
self.context.is_admin = False
flavor = self.controller.show(
self.req, self.disabled_type['flavorid'])['flavor']
self.assertEqual(flavor['name'], self.disabled_type['name'])
def test_show_should_include_disabled_flavor_for_admin(self):
self.context.is_admin = True
flavor = self.controller.show(
self.req, self.disabled_type['flavorid'])['flavor']
self.assertEqual(flavor['name'], self.disabled_type['name'])
class DisabledFlavorsWithRealDBTestV20(DisabledFlavorsWithRealDBTestV21):
"""Tests that disabled flavors should not be shown nor listed."""
Controller = flavors_v2.Controller
api_version = "2.0"
_prefix = "/v2/fake"
fake_request = fakes.HTTPRequest
class ParseIsPublicTestV21(test.TestCase):
Controller = flavors_v3.FlavorsController
api_version = "2.1"
def setUp(self):
super(ParseIsPublicTestV21, self).setUp()
self.controller = self.Controller()
def assertPublic(self, expected, is_public):
self.assertIs(expected, self.controller._parse_is_public(is_public),
'%s did not return %s' % (is_public, expected))
def test_None(self):
self.assertPublic(True, None)
def test_truthy(self):
self.assertPublic(True, True)
self.assertPublic(True, 't')
self.assertPublic(True, 'true')
self.assertPublic(True, 'yes')
self.assertPublic(True, '1')
def test_falsey(self):
self.assertPublic(False, False)
self.assertPublic(False, 'f')
self.assertPublic(False, 'false')
self.assertPublic(False, 'no')
self.assertPublic(False, '0')
def test_string_none(self):
self.assertPublic(None, 'none')
self.assertPublic(None, 'None')
def test_other(self):
self.assertRaises(
webob.exc.HTTPBadRequest, self.assertPublic, None, 'other')
class ParseIsPublicTestV20(ParseIsPublicTestV21):
Controller = flavors_v2.Controller
api_version = "2.0"
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import httplib, urlparse
from libcloud.utils.py3 import b
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.container.base import (Container, ContainerDriver,
ContainerImage)
from libcloud.container.providers import Provider
from libcloud.container.types import ContainerState
VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
class RancherResponse(JsonResponse):
def parse_error(self):
parsed = super(RancherResponse, self).parse_error()
if 'fieldName' in parsed:
return "Field %s is %s: %s - %s" % (parsed['fieldName'],
parsed['code'],
parsed['message'],
parsed['detail'])
else:
return "%s - %s" % (parsed['message'], parsed['detail'])
def success(self):
return self.status in VALID_RESPONSE_CODES
class RancherException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return "%s %s" % (self.code, self.message)
def __repr__(self):
return "RancherException %s %s" % (self.code, self.message)
class RancherConnection(ConnectionUserAndKey):
responseCls = RancherResponse
timeout = 30
def add_default_headers(self, headers):
"""
Add parameters that are necessary for every request
If user and password are specified, include a base http auth
header
"""
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
if self.key and self.user_id:
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class RancherContainerDriver(ContainerDriver):
"""
Driver for Rancher by Rancher Labs.
This driver is capable of interacting with the Version 1 API of Rancher.
It currently does NOT support the Version 2 API.
Example:
>>> from libcloud.container.providers import get_driver
>>> from libcloud.container.types import Provider
>>> driver = get_driver(Provider.RANCHER)
>>> connection = driver(key="ACCESS_KEY_HERE",
secret="SECRET_KEY_HERE", host="172.30.0.100", port=8080)
>>> image = ContainerImage("hastebin", "hastebin", "rlister/hastebin",
"latest", driver=None)
>>> newcontainer = connection.deploy_container("myawesomepastebin",
image, environment={"STORAGE_TYPE": "file"})
:ivar baseuri: The URL base path to the API.
:type baseuri: ``str``
"""
type = Provider.RANCHER
name = 'Rancher'
website = 'http://rancher.com'
connectionCls = RancherConnection
# Holding off on cluster support for now.
# Only Environment API interaction enabled.
supports_clusters = False
# As in the /v1/
version = '1'
def __init__(self, key, secret, secure=True, host='localhost', port=443):
"""
Creates a new Rancher Container driver.
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Whether to use HTTPS or HTTP.
:type secure: ``bool``
:param host: Override hostname used for connections. This can also
be a full URL string, including scheme, port, and base path.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:return: A newly initialized driver instance.
"""
# Parse the Given Host
if '://' not in host and not host.startswith("//"):
host = '//' + host
parsed = urlparse.urlparse(host)
super(RancherContainerDriver, self).__init__(
key=key,
secret=secret,
secure=False if parsed.scheme == 'http' else secure,
host=parsed.hostname,
port=parsed.port if parsed.port else port
)
self.baseuri = parsed.path if parsed.path else "/v%s" % self.version
def ex_list_stacks(self):
"""
List all Rancher Stacks
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/
:rtype: ``list`` of ``dict``
"""
result = self.connection.request(
"%s/environments" % self.baseuri).object
return result['data']
def ex_deploy_stack(self, name, description=None, docker_compose=None,
environment=None, external_id=None,
rancher_compose=None, start=True):
"""
Deploy a new stack.
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/#create
:param name: The desired name of the stack. (required)
:type name: ``str``
:param description: A desired description for the stack.
:type description: ``str``
:param docker_compose: The Docker Compose configuration to use.
:type docker_compose: ``str``
:param environment: Environment K/V specific to this stack.
:type environment: ``dict``
:param external_id: The externalId of the stack.
:type external_id: ``str``
:param rancher_compose: The Rancher Compose configuration for this env.
:type rancher_compose: ``str``
:param start: Whether to start this stack on creation.
:type start: ``bool``
:return: The newly created stack.
:rtype: ``dict``
"""
payload = {
"description": description,
"dockerCompose": docker_compose,
"environment": environment,
"externalId": external_id,
"name": name,
"rancherCompose": rancher_compose,
"startOnCreate": start
}
data = json.dumps(dict((k, v) for (k, v) in payload.items()
if v is not None))
result = self.connection.request('%s/environments' %
self.baseuri, data=data,
method='POST').object
return result
def ex_get_stack(self, env_id):
"""
Get a stack by ID
:param env_id: The stack to be obtained.
:type env_id: ``str``
:rtype: ``dict``
"""
result = self.connection.request("%s/environments/%s" %
(self.baseuri, env_id)).object
return result
def ex_search_stacks(self, search_params):
"""
Search for stacks matching certain filters
i.e. ``{ "name": "awesomestack"}``
:param search_params: A collection of search parameters to use.
:type search_params: ``dict``
:rtype: ``list``
"""
search_list = []
for f, v in search_params.items():
search_list.append(f + '=' + v)
search_items = '&'.join(search_list)
result = self.connection.request("%s/environments?%s" % (
self.baseuri, search_items)).object
return result['data']
def ex_destroy_stack(self, env_id):
"""
Destroy a stack by ID
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/#delete
:param env_id: The stack to be destroyed.
:type env_id: ``str``
:return: True if destroy was successful, False otherwise.
:rtype: ``bool``
"""
result = self.connection.request('%s/environments/%s' % (
self.baseuri, env_id),
method='DELETE')
return result.status in VALID_RESPONSE_CODES
def ex_activate_stack(self, env_id):
"""
Activate Services for a stack.
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/#activateservices
:param env_id: The stack to activate services for.
:type env_id: ``str``
:return: True if activate was successful, False otherwise.
:rtype: ``bool``
"""
result = self.connection.request(
'%s/environments/%s?action=activateservices' % (
self.baseuri, env_id), method='POST'
)
return result.status in VALID_RESPONSE_CODES
def ex_deactivate_stack(self, env_id):
"""
Deactivate Services for a stack.
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/environment/#deactivateservices
:param env_id: The stack to deactivate services for.
:type env_id: ``str``
:return: True if deactivate was successful, False otherwise.
:rtype: ``bool``
"""
result = self.connection.request(
'%s/environments/%s?action=deactivateservices' % (
self.baseuri, env_id), method='POST'
)
return result.status in VALID_RESPONSE_CODES
def ex_list_services(self):
"""
List all Rancher Services
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/
:rtype: ``list`` of ``dict``
"""
result = self.connection.request("%s/services" % self.baseuri).object
return result['data']
def ex_deploy_service(self, name, image, environment_id,
start=True, assign_service_ip_address=None,
service_description=None, external_id=None,
metadata=None, retain_ip=None, scale=None,
scale_policy=None, secondary_launch_configs=None,
selector_container=None, selector_link=None,
vip=None, **launch_conf):
"""
Deploy a Rancher Service under a stack.
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/#create
*Any further configuration passed applies to the ``launchConfig``*
:param name: The desired name of the service. (required)
:type name: ``str``
:param image: The Image object to deploy. (required)
:type image: :class:`libcloud.container.base.ContainerImage`
:param environment_id: The stack ID this service is tied to. (required)
:type environment_id: ``str``
:param start: Whether to start the service on creation.
:type start: ``bool``
:param assign_service_ip_address: The IP address to assign the service.
:type assign_service_ip_address: ``bool``
:param service_description: The service description.
:type service_description: ``str``
:param external_id: The externalId for this service.
:type external_id: ``str``
:param metadata: K/V Metadata for this service.
:type metadata: ``dict``
:param retain_ip: Whether this service should retain its IP.
:type retain_ip: ``bool``
:param scale: The scale of containers in this service.
:type scale: ``int``
:param scale_policy: The scaling policy for this service.
:type scale_policy: ``dict``
:param secondary_launch_configs: Secondary container launch configs.
:type secondary_launch_configs: ``list``
:param selector_container: The selectorContainer for this service.
:type selector_container: ``str``
:param selector_link: The selectorLink for this service.
:type selector_link: ``type``
:param vip: The VIP to assign to this service.
:type vip: ``str``
:return: The newly created service.
:rtype: ``dict``
"""
launch_conf['imageUuid'] = self._degen_image(image),
service_payload = {
"assignServiceIpAddress": assign_service_ip_address,
"description": service_description,
"environmentId": environment_id,
"externalId": external_id,
"launchConfig": launch_conf,
"metadata": metadata,
"name": name,
"retainIp": retain_ip,
"scale": scale,
"scalePolicy": scale_policy,
"secondary_launch_configs": secondary_launch_configs,
"selectorContainer": selector_container,
"selectorLink": selector_link,
"startOnCreate": start,
"vip": vip
}
data = json.dumps(dict((k, v) for (k, v) in service_payload.items()
if v is not None))
result = self.connection.request('%s/services' % self.baseuri,
data=data, method='POST').object
return result
def ex_get_service(self, service_id):
"""
Get a service by ID
:param service_id: The service_id to be obtained.
:type service_id: ``str``
:rtype: ``dict``
"""
result = self.connection.request("%s/services/%s" %
(self.baseuri, service_id)).object
return result
def ex_search_services(self, search_params):
"""
Search for services matching certain filters
i.e. ``{ "name": "awesomesause", "environmentId": "1e2"}``
:param search_params: A collection of search parameters to use.
:type search_params: ``dict``
:rtype: ``list``
"""
search_list = []
for f, v in search_params.items():
search_list.append(f + '=' + v)
search_items = '&'.join(search_list)
result = self.connection.request("%s/services?%s" % (
self.baseuri, search_items)).object
return result['data']
def ex_destroy_service(self, service_id):
"""
Destroy a service by ID
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/#delete
:param service_id: The service to be destroyed.
:type service_id: ``str``
:return: True if destroy was successful, False otherwise.
:rtype: ``bool``
"""
result = self.connection.request('%s/services/%s' % (self.baseuri,
service_id), method='DELETE')
return result.status in VALID_RESPONSE_CODES
def ex_activate_service(self, service_id):
"""
Activate a service.
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/#activate
:param service_id: The service to activate services for.
:type service_id: ``str``
:return: True if activate was successful, False otherwise.
:rtype: ``bool``
"""
result = self.connection.request('%s/services/%s?action=activate' %
(self.baseuri, service_id),
method='POST')
return result.status in VALID_RESPONSE_CODES
def ex_deactivate_service(self, service_id):
"""
Deactivate a service.
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/service/#deactivate
:param service_id: The service to deactivate services for.
:type service_id: ``str``
:return: True if deactivate was successful, False otherwise.
:rtype: ``bool``
"""
result = self.connection.request('%s/services/%s?action=deactivate' %
(self.baseuri, service_id),
method='POST')
return result.status in VALID_RESPONSE_CODES
def list_containers(self):
"""
List the deployed containers.
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/container/
:rtype: ``list`` of :class:`libcloud.container.base.Container`
"""
result = self.connection.request("%s/containers" % self.baseuri).object
containers = [self._to_container(value) for value in result['data']]
return containers
def deploy_container(self, name, image, parameters=None, start=True,
**config):
"""
Deploy a new container.
http://docs.rancher.com/rancher/v1.2/en/api/api-resources/container/#create
**The following is the Image format used for ``ContainerImage``**
*For a ``imageuuid``*:
- ``docker:<hostname>:<port>/<namespace>/<imagename>:<version>``
*The following applies*:
- ``id`` = ``<imagename>``
- ``name`` = ``<imagename>``
- ``path`` = ``<hostname>:<port>/<namespace>/<imagename>``
- ``version`` = ``<version>``
*Any extra configuration can also be passed i.e. "environment"*
:param name: The desired name of the container. (required)
:type name: ``str``
:param image: The Image object to deploy. (required)
:type image: :class:`libcloud.container.base.ContainerImage`
:param parameters: Container Image parameters (unused)
:type parameters: ``str``
:param start: Whether to start the container on creation(startOnCreate)
:type start: ``bool``
:rtype: :class:`Container`
"""
payload = {
"name": name,
"imageUuid": self._degen_image(image),
"startOnCreate": start,
}
config.update(payload)
data = json.dumps(config)
result = self.connection.request('%s/containers' % self.baseuri,
data=data, method='POST').object
return self._to_container(result)
def get_container(self, con_id):
"""
Get a container by ID
:param con_id: The ID of the container to get
:type con_id: ``str``
:rtype: :class:`libcloud.container.base.Container`
"""
result = self.connection.request("%s/containers/%s" %
(self.baseuri, con_id)).object
return self._to_container(result)
def start_container(self, container):
"""
Start a container
:param container: The container to be started
:type container: :class:`libcloud.container.base.Container`
:return: The container refreshed with current data
:rtype: :class:`libcloud.container.base.Container`
"""
result = self.connection.request('%s/containers/%s?action=start' %
(self.baseuri, container.id),
method='POST').object
return self._to_container(result)
def stop_container(self, container):
"""
Stop a container
:param container: The container to be stopped
:type container: :class:`libcloud.container.base.Container`
:return: The container refreshed with current data
:rtype: :class:`libcloud.container.base.Container`
"""
result = self.connection.request('%s/containers/%s?action=stop' %
(self.baseuri, container.id),
method='POST').object
return self._to_container(result)
def ex_search_containers(self, search_params):
"""
Search for containers matching certain filters
i.e. ``{ "imageUuid": "docker:mysql", "state": "running"}``
:param search_params: A collection of search parameters to use.
:type search_params: ``dict``
:rtype: ``list``
"""
search_list = []
for f, v in search_params.items():
search_list.append(f + '=' + v)
search_items = '&'.join(search_list)
result = self.connection.request("%s/containers?%s" % (
self.baseuri, search_items)).object
return result['data']
def destroy_container(self, container):
"""
Remove a container
:param container: The container to be destroyed
:type container: :class:`libcloud.container.base.Container`
:return: True if the destroy was successful, False otherwise.
:rtype: ``bool``
"""
result = self.connection.request('%s/containers/%s' % (self.baseuri,
container.id), method='DELETE').object
return self._to_container(result)
def _gen_image(self, imageuuid):
"""
This function converts a valid Rancher ``imageUuid`` string to a valid
image object. Only supports docker based images hence `docker:` must
prefix!!
Please see the deploy_container() for details on the format.
:param imageuuid: A valid Rancher image string
i.e. ``docker:rlister/hastebin:8.0``
:type imageuuid: ``str``
:return: Converted ContainerImage object.
:rtype: :class:`libcloud.container.base.ContainerImage`
"""
# Obtain just the name(:version) for parsing
if '/' not in imageuuid:
# String looks like `docker:mysql:8.0`
image_name_version = imageuuid.partition(':')[2]
else:
# String looks like `docker:oracle/mysql:8.0`
image_name_version = imageuuid.rpartition("/")[2]
# Parse based on ':'
if ':' in image_name_version:
version = image_name_version.partition(":")[2]
id = image_name_version.partition(":")[0]
name = id
else:
version = 'latest'
id = image_name_version
name = id
# Get our path based on if there was a version
if version != 'latest':
path = imageuuid.partition(':')[2].rpartition(':')[0]
else:
path = imageuuid.partition(':')[2]
return ContainerImage(
id=id,
name=name,
path=path,
version=version,
driver=self.connection.driver,
extra={
"imageUuid": imageuuid
}
)
def _degen_image(self, image):
"""
Take in an image object to break down into an ``imageUuid``
:param image:
:return:
"""
# Only supporting docker atm
image_type = "docker"
if image.version is not None:
return image_type + ':' + image.path + ':' + image.version
else:
return image_type + ':' + image.path
def _to_container(self, data):
"""
Convert container in proper Container instance object
** Updating is NOT supported!!
:param data: API data about container i.e. result.object
:return: Proper Container object:
see http://libcloud.readthedocs.io/en/latest/container/api.html
"""
rancher_state = data['state']
# A Removed container is purged after x amt of time.
# Both of these render the container dead (can't be started later)
terminate_condition = ["removed", "purged"]
if 'running' in rancher_state:
state = ContainerState.RUNNING
elif 'stopped' in rancher_state:
state = ContainerState.STOPPED
elif 'restarting' in rancher_state:
state = ContainerState.REBOOTING
elif 'error' in rancher_state:
state = ContainerState.ERROR
elif any(x in rancher_state for x in terminate_condition):
state = ContainerState.TERMINATED
elif data['transitioning'] == 'yes':
# Best we can do for current actions
state = ContainerState.PENDING
else:
state = ContainerState.UNKNOWN
# Everything contained in the json response is dumped in extra
extra = data
return Container(
id=data['id'],
name=data['name'],
image=self._gen_image(data['imageUuid']),
ip_addresses=[data['primaryIpAddress']],
state=state,
driver=self.connection.driver,
extra=extra)
| |
"""
Agent running periodically on a server with installed python-zimbra libs,
that looks on the server for emails, that have reached their followup time,
moves them to the inbox and tags them.
"""
import logging
from optparse import OptionParser
import re
from datetime import datetime
from pythonzimbra.tools import auth
from pythonzimbra.communication import Communication
ZIMBRA_INBOX_ID = 2
if __name__ == "__main__":
# Interpret arguments
parser = OptionParser(
usage="Usage: %prog [options] SERVER USERNAME PREAUTH",
description="SERVER: Name/IP of Zimbra-Server, "
+ "USERNAME: Administrative account username, "
+ "PASSWORD: Password of administrative account"
)
parser.add_option(
"-l",
"--distlist",
dest="distlist",
help="Use members of this distribution list instead of all users"
)
parser.add_option(
"-o",
"--domain",
dest="domain",
help="Use members from this domain instead of all users"
)
parser.add_option(
"-q",
"--quiet",
action="store_true",
dest="quiet",
help="Be quiet doing things.",
)
parser.add_option(
"-d",
"--debug",
action="store_true",
dest="debug",
help="Enable debug logging"
)
(options, args) = parser.parse_args()
if len(args) < 3:
parser.error("Invalid number of arguments")
(server_name, admin_account, admin_password) = args
if options.quiet and options.debug:
parser.error("Cannot specify debug and quiet at the same time.")
if options.quiet:
logging.basicConfig(level=logging.ERROR)
elif options.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.debug("Starting followup-agent.")
logging.debug("Authenticating as administrator to get users and domain "
"preauth")
server_url = "https://%s:7071/service/admin/soap" % server_name
comm = Communication(server_url)
token = auth.authenticate(server_url,
admin_account, admin_password, admin_auth=True)
users = []
if options.distlist:
logging.debug("Retrieving distribution list members from list %s" % (
options.distlist
))
get_members_request = comm.gen_request(token=token)
get_members_request.add_request(
"GetDistributionListRequest",
{
"dl": {
"_content": options.distlist,
"by": "name"
}
},
"urn:zimbraAdmin"
)
get_members_response = comm.send_request(get_members_request)
if get_members_response.is_fault():
raise Exception(
"Error loading distribution list members: (%s) %s" % (
get_members_response.get_fault_code(),
get_members_response.get_fault_message()
)
)
else:
for user in get_members_response.get_response()[
"GetDistributionListResponse"]["dl"]["dlm"]:
users.append(user["_content"])
else:
get_users_request = comm.gen_request(token=token)
param = {}
if options.domain:
logging.debug("Loading users from domain %s" % options.domain)
param["domain"] = {
"by": "name",
"_content": options.domain
}
else:
logging.debug("Fetching all users")
get_users_request.add_request(
"GetAllAccountsRequest",
param,
"urn:zimbraAdmin"
)
get_users_response = comm.send_request(get_users_request)
if get_users_response.is_fault():
raise Exception(
"Error loading users: (%s) %s" % (
get_users_response.get_fault_code(),
get_users_response.get_fault_message()
)
)
else:
for user in get_users_response.get_response()[
"GetAllAccountsResponse"]["account"]:
users.append(user["name"])
preauth_cache = {}
for user in users:
logging.debug("Checking user %s" % user)
(local_part, domain_part) = user.split("@")
if domain_part not in preauth_cache:
logging.debug("Fetching preauth key for domain %s" % domain_part)
get_pak_request = comm.gen_request(token=token)
get_pak_request.add_request(
"GetDomainRequest",
{
"domain": {
"by": "name",
"_content": domain_part
}
},
"urn:zimbraAdmin"
)
get_pak_response = comm.send_request(get_pak_request)
if get_pak_response.is_fault():
raise Exception(
"Error loading domain preauth "
"key for domain %s: (%s) %s" % (
domain_part,
get_pak_response.get_fault_code(),
get_pak_response.get_fault_message()
)
)
pak = ""
for parameter in get_pak_response.get_response()[
"GetDomainResponse"]["domain"]["a"]:
if parameter["n"] == "zimbraPreAuthKey":
pak = parameter["_content"]
if pak == "":
raise Exception(
"Cannot find preauth key for domain %s. "
"Please use zmprov gdpak %s first." % (
domain_part, domain_part
)
)
preauth_cache[domain_part] = str(pak)
# Get zimlet properties
get_account_request = comm.gen_request(token=token)
get_account_request.add_request(
"GetAccountRequest",
{
"account": {
"by": "name",
"_content": user
}
},
"urn:zimbraAdmin"
)
get_account_response = comm.send_request(get_account_request)
if get_account_response.is_fault():
raise Exception(
"Cannot get account properties for account %s: (%s) %s" % (
user,
get_account_response.get_fault_code(),
get_account_response.get_fault_message()
)
)
zimlet_props = {}
for prop in get_account_response.get_response()[
"GetAccountResponse"]["account"]["a"]:
tmp_prop = re.match(
"^de_dieploegers_followup:([^:]*):(.*)$",
prop["_content"]
)
if tmp_prop:
zimlet_props[tmp_prop.group(1)] = tmp_prop.group(2)
if len(zimlet_props.items()) == 0 or \
"followupFolderId" not in zimlet_props or \
"followupTagName" not in zimlet_props:
# No zimlet properties set. Move on
logging.debug("User is not using zimlet")
continue
logging.debug("Authenticating as user")
user_token = auth.authenticate(
"https://%s/service/soap" % server_name,
user,
preauth_cache[domain_part]
)
# Get mails in followup-folder
logging.debug("Opening followup folder")
search_request = comm.gen_request(token=user_token)
search_request.add_request(
"SearchRequest",
{
"types": "message",
"fetch": "all",
"query": {
"_content": "inid:%s" % zimlet_props["followupFolderId"]
}
},
"urn:zimbraMail"
)
search_response = comm.send_request(search_request)
if search_response.is_fault():
raise Exception(
"Cannot fetch mails in followup folder: (%s) %s" % (
search_response.get_fault_code(),
search_response.get_fault_message()
)
)
if "m" not in search_response.get_response()["SearchResponse"]:
# No mails found
logging.info("No mails found.")
mails = []
else:
mails = search_response.get_response()["SearchResponse"]["m"]
if isinstance(mails, dict):
mails = [mails]
for mail in mails:
logging.debug("Mail %s (%s)" % (mail["id"], mail["su"]))
mail_date = datetime.fromtimestamp(long(mail["d"])/1000)
if mail_date <= datetime.now():
# Mail is due
logging.info("Mail %s is due for followup. (%s)" % (
mail["id"],
mail["su"]
))
logging.debug("Tagging it.")
tag_request = comm.gen_request(token=user_token)
tag_request.add_request(
"MsgActionRequest",
{
"action": {
"id": mail["id"],
"op": "tag",
"tn": zimlet_props["followupTagName"]
}
},
"urn:zimbraMail"
)
tag_response = comm.send_request(tag_request)
if tag_response.is_fault():
raise Exception(
"Cannot tag mail: (%s) %s" % (
tag_response.get_fault_code(),
tag_response.get_fault_message()
)
)
logging.debug("Moving it back to inbox")
move_request = comm.gen_request(token=user_token)
move_request.add_request(
"MsgActionRequest",
{
"action": {
"id": mail["id"],
"op": "move",
"l": ZIMBRA_INBOX_ID
}
},
"urn:zimbraMail"
)
move_response = comm.send_request(move_request)
if move_response.is_fault():
raise Exception(
"Cannot move mail to followup folder: (%s) %s" % (
move_response.get_fault_code(),
move_response.get_fault_message()
)
)
logging.debug("Setting mail to unread")
unread_request = comm.gen_request(token=user_token)
unread_request.add_request(
"MsgActionRequest",
{
"action": {
"id": mail["id"],
"op": "!read"
}
},
"urn:zimbraMail"
)
unread_response = comm.send_request(unread_request)
if unread_response.is_fault():
raise Exception(
"Cannot set mail to unread: (%s) %s" % (
unread_response.get_fault_code(),
unread_response.get_fault_message()
)
)
else:
logging.debug("Not due. Skipping")
logging.debug("Done.")
| |
#!/usr/bin/python
# vim: sw=4 et
# Copyright (c) 2016 SUSE LINUX GmbH, Nuernberg, Germany.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
import argparse
import glob
import os
import platform
import shutil
import sys
import tempfile
import time
import pymod2pkg
import sh
from sh import Command
def pymodule2pkg(spectemplate):
specname = os.path.splitext(spectemplate)[0]
modulename = os.path.splitext(os.path.basename(specname))[0]
pkgname = pymod2pkg.module2package(modulename,
platform.linux_distribution()[0])
if modulename == 'openstack-macros':
pkgname = modulename
return pkgname
def get_osc_user():
import osc.conf
osc.conf.get_config()
return osc.conf.get_apiurl_usr(osc.conf.config['apiurl'])
def upload_meta(project, build_repository, linkproject):
projectlink = ''
if linkproject:
projectlink = '<link project="%s"/>\n' % linkproject
description = ''
if linkproject:
if 'ZUUL_UUID' in os.environ:
description = """
This project tests the following Zuul Change IDs: %(ZUUL_CHANGE_IDS)s\n
Branch used: %(ZUUL_BRANCH)s\n
Project used: %(ZUUL_PROJECT)s
""" % (os.environ)
templ = """
<project name="%(project)s">
<title>Autogenerated CI project</title>
<description>
%(description)s
</description>
%(projectlink)s
<person userid="%(user)s" role="maintainer"/>
<publish>
<disable/>
</publish>
%(build_repository)s
</project>""" % ({'project': project,
'user': get_osc_user(),
'description': description,
'projectlink': projectlink,
'build_repository': build_repository})
with tempfile.NamedTemporaryFile(delete=False) as meta:
meta.write(templ)
print('Updating meta for ', project)
meta.close()
# work around build service bug that triggers a database deadlock
for fail_counter in range(1, 5):
try:
sh.osc('api', '-T', meta.name, '/source/%s/_meta' % project)
break
except sh.ErrorReturnCode_1:
# Sleep a bit and try again. This has not been scientifically
# proven to be the correct sleep factor, but it seems to work
time.sleep(2)
continue
os.unlink(meta.name)
def upload_meta_enable_repository(project, linkproject):
repository = """
<repository name="standard" %(repoflags)s>
<path project="%(linkproject)s" repository="standard"/>
<arch>x86_64</arch>
</repository>
""" % ({'linkproject': linkproject,
'repoflags': 'rebuild="local" block="local" linkedbuild="localdep"'})
upload_meta(project, repository, linkproject)
def create_new_build_project(workdir, project, linkproject):
sh.mkdir('-p', workdir)
olddir = os.getcwd()
try:
os.chdir(workdir)
if linkproject:
upload_meta_enable_repository(project, linkproject)
sh.osc('init', project)
finally:
os.chdir(olddir)
def generate_pkgspec(pkgoutdir, global_requirements, spectemplate, pkgname):
obsservicedir = '/usr/lib/obs/service/'
outdir = ('--outdir', pkgoutdir)
olddir = os.getcwd()
try:
os.chdir(pkgoutdir)
renderspec = Command(os.path.join(obsservicedir, 'renderspec'))
renderspec(
'--input-template', os.path.join(olddir, spectemplate),
'--requirements', os.path.join(olddir, global_requirements),
'--output-name', pkgname + '.spec', *outdir)
format_spec_file = Command(
os.path.join(obsservicedir, 'format_spec_file'))
format_spec_file(*outdir)
# configure a download cache to avoid downloading the same files
download_env = os.environ.copy()
download_env["CACHEDIRECTORY"] = os.path.join(
os.path.expanduser("~"), ".cache", "download_files")
download_files = Command(os.path.join(obsservicedir, 'download_files'))
download_files(_env=download_env, *outdir)
finally:
os.chdir(olddir)
def osc_mkpac(workdir, packagename):
olddir = os.getcwd()
try:
os.chdir(workdir)
sh.osc('mkpac', packagename)
finally:
os.chdir(olddir)
def spec_is_modified(pkgoutdir, project, pkgname):
specname = pkgname + ".spec"
cached_spec = os.path.join(pkgoutdir, '.osc', specname)
cleanup = False
if not os.path.exists(cached_spec):
cleanup = True
sh.osc('api', '/source/%s/%s/%s.spec' % (
project, pkgname, pkgname), _out=cached_spec)
r = sh.cmp(
'-s', os.path.join(pkgoutdir, specname), cached_spec, _ok_code=[0, 1])
if cleanup:
os.remove(cached_spec)
return r.exit_code == 1
def osc_detachbranch(workdir, project, pkgname):
olddir = os.getcwd()
try:
os.chdir(os.path.join(workdir))
sh.osc('detachbranch', project, pkgname)
os.mkdir(pkgname + '.b')
for f in glob.glob(os.path.join(pkgname, '*')):
os.rename(f, os.path.join(pkgname + '.b', os.path.basename(f)))
sh.rm('-rf', pkgname)
sh.osc('co', pkgname)
for f in glob.glob(os.path.join(pkgname + '.b', '*')):
dst = os.path.basename(f)
try:
os.unlink(os.path.join(pkgname, dst))
except OSError:
pass
os.rename(f, os.path.join(pkgname, dst))
os.rmdir(pkgname + '.b')
finally:
os.chdir(olddir)
def osc_commit_all(workdir, packagename):
olddir = os.getcwd()
try:
os.chdir(os.path.join(workdir, packagename))
sh.osc('addremove')
sh.osc('commit', '--noservice', '-n')
finally:
os.chdir(olddir)
def copy_extra_sources(specdir, pkgoutdir):
for f in glob.glob(os.path.join(specdir, '*')):
if f.endswith(".j2"):
continue
shutil.copy2(f, pkgoutdir)
def create_project(worktree, project, linkproject):
workdir = os.path.join(os.getcwd(), 'out')
sh.rm('-rf', workdir)
create_new_build_project(workdir, project, linkproject)
try:
existing_pkgs = [x.strip() for x in
sh.osc('ls', '-e', project, _iter=True)]
except:
existing_pkgs = []
alive_pkgs = set()
worktree_pattern = os.path.join(worktree, 'openstack', '*', '*.spec.j2')
for spectemplate in sorted(glob.glob(worktree_pattern)):
pkgname = pymodule2pkg(spectemplate)
alive_pkgs.add(pkgname)
print(pkgname)
sys.stdout.flush()
pkgoutdir = os.path.join(workdir, pkgname)
osc_mkpac(workdir, pkgname)
copy_extra_sources(os.path.dirname(spectemplate), pkgoutdir)
generate_pkgspec(
pkgoutdir,
os.path.join(worktree, 'global-requirements.txt'),
spectemplate, pkgname)
if pkgname in existing_pkgs:
if spec_is_modified(pkgoutdir, project, pkgname):
osc_detachbranch(workdir, project, pkgname)
print("Committing update to %s" % pkgname)
osc_commit_all(workdir, pkgname)
else:
print("Adding new pkg %s" % pkgname)
osc_commit_all(workdir, pkgname)
# remove no longer alive pkgs
for i in existing_pkgs:
if not linkproject and i not in alive_pkgs:
print("Removing outdated ", i)
sh.osc('rdelete', '-m', 'x', project, i)
def main():
parser = argparse.ArgumentParser(
description='Build a testproject for a given rpm-packaging checkout')
parser.add_argument('worktree',
help='directory with a rpm-packaging checkout')
parser.add_argument('project',
help='name of the destination buildservice project')
parser.add_argument('--linkproject',
help='create project link to given project')
args = parser.parse_args()
create_project(args.worktree, args.project, args.linkproject)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
"""Manages a Bento cluster."""
import logging
import os
import re
import signal
import subprocess
import sys
import xml.etree.ElementTree as etree
from base import base
from base import cli
from kiji import bento_cluster
from kiji import maven_repo
class Error(Exception):
"""Errors in this module."""
pass
# ------------------------------------------------------------------------------
def Find(root, regex):
"""Finds files whose name match a given regexp.
Equivalent of 'find <root> -name <regex>'
Args:
root: Base directory to scan for files.
regex: Match file names against this regexp.
Yields:
Paths of the files matching the regexp.
"""
assert os.path.exists(root), root
pattern = re.compile(regex)
for dir_path, dir_names, file_names in os.walk(root):
for file_name in file_names:
if pattern.match(file_name):
yield os.path.join(dir_path, file_name)
# ------------------------------------------------------------------------------
def ExtractArchive(archive, work_dir, strip_components=0):
"""Extracts a tar archive.
Args:
archive: Path to the tar archive to extract.
work_dir: Where to extract the archive.
strip_components: How many leading path components to strip.
"""
assert os.path.exists(archive), (
'Archive %r does not exist', archive)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
command = (
'/bin/tar xf {archive}'
' --directory {dir}'
' --strip-components={strip_components}'
).format(
archive=archive,
dir=work_dir,
strip_components=strip_components,
)
logging.info('Running command: %r', command)
os.system(command)
# ------------------------------------------------------------------------------
class KijiBento(object):
"""KijiBento distribution.
Wraps a KijiBento installation.
"""
def __init__(self, path, version=None):
"""Initializes the KijiBento object.
Args:
path: Path of the KijiBento install directory.
version: Bento version, eg. '1.0.1' or '1.0.2-SNAPSHOT'.
Latest version if not specified or None.
"""
self._path = path
self._version = version
self._bento_cluster = None
@property
def path(self):
return self._path
@property
def version(self):
return self._version
@property
def installed(self):
return os.path.exists(os.path.join(self.path, 'bin', 'kiji-env.sh'))
@property
def bento_cluster(self):
if self._bento_cluster is None:
bento_path = os.path.join(self.path, 'cluster')
self._bento_cluster = bento_cluster.BentoCluster(
path=bento_path,
version=None, # Bento must not be fetched separately
enable_log=True,
)
return self._bento_cluster
def Install(self):
"""Ensures KijiBento is installed properly."""
if not self.installed:
self._Fetch(version=self.version)
def GetMostRecentVersion(self):
"""Reports the most recent version of KijiBento from the SNAPSHOT repo."""
snapshot_repo = maven_repo.RemoteRepository(maven_repo.KIJI_SNAPSHOT_REPO)
bento_versions = list(snapshot_repo.ListVersions(group='org.kiji.kiji-bento',
artifact='kiji-bento'))
def order_versions(version_string):
"""Splits the version string into a tuple of numbers that can be compared with their
natural ordering to get ordering that matches version semantics.
rc versions, since we no longer use them, are replaced with 0."""
split_str = re.split("[.-]", version_string)
if (split_str[-1] == "SNAPSHOT"):
split_str[-1] = "1"
if (re.search("rc.*", split_str[-2])):
split_str[-2] = 0
else:
logging.error("Encountered unexpected non-snapshot version %s in repo %s.",
version_string,
snapshot_repo)
split_str.append("0")
return list(map(int, split_str))
latest_version = sorted(
bento_versions,
reverse=True,
key=order_versions
)[0]
logging.info("Using latest version %r from possible versions %r.",
latest_version,
bento_versions)
return latest_version
def _Fetch(self, version):
"""Fetches and installs the specified version of BentoCluster."""
assert not self.installed
repo = maven_repo.MavenRepository(
remotes=[
maven_repo.KIJI_PUBLIC_REPO,
maven_repo.KIJI_SNAPSHOT_REPO,
],
)
if version is None:
version = self.GetMostRecentVersion()
local_path = repo.Get(
group='org.kiji.kiji-bento',
artifact='kiji-bento',
version=version,
classifier='release',
type='tar.gz',
)
# Strip the first path component from the kiji-bento release archive:
# The top-level directory is "kiji-bento-<code-name>/",
# but we don't know the code-name at this point.
ExtractArchive(archive=local_path, work_dir=self.path, strip_components=1)
assert self.installed
# Ensure the BentoCluster exists:
self.bento_cluster
# ------------------------------------------------------------------------------
class CLI(cli.Action):
def RegisterFlags(self):
self.flags.AddString(
name='install_dir',
default='/tmp/kiji-bento',
help='Path where KijiBento is installed.',
)
self.flags.AddString(
name='version',
default='1.0.0',
help='KijiBento version.',
)
self.flags.AddString(
name='do',
default='install',
help='Action to perform: install.',
)
def Run(self, args):
bento = None
assert (len(args) == 0), ('Unexpected arguments: %r' % args)
if self.flags.do == 'install':
bento = KijiBento(
path=self.flags.install_dir,
version=self.flags.version,
)
else:
raise Error('Unknown action %r' % self.flags.do)
def Main(args):
cli = CLI()
return cli(args)
if __name__ == '__main__':
base.Run(Main)
| |
from datetime import datetime
from . import ApiObject
class Host(ApiObject):
"""
[Zabbix Host](https://www.zabbix.com/documentation/2.2/manual/api/reference/host/object)
"""
@classmethod
def by_name(C, api, name):
"""
Return a new `Host` with matching `name`.
"""
params = dict(
output = 'extend',
filter = dict(name=name),
selectGroups = True,
)
result = api.response('host.get', **params).get('result')
if not result:
return None
return C(api, **result[0])
def process_refs(I, attrs):
I._items = None
I._groups = {}
if 'groups' in attrs:
for group in attrs['groups']:
I._groups[group['name']] = HostGroup(I._api, **group)
@property
def groups(I):
"""
Map[HostGroup.name -> HostGroup] of associated `HostGroups`.
"""
return I._groups
@property
def items(I):
"""
Map[Item.key -> Item] of associated `Items`.
"""
if I._items is None:
I._items = {}
for item in I._api.response('item.get', output='extend', hostids=I.id).get('result'):
I._items[item['key_']] = Item(I._api, **item)
return I._items
def triggers(I):
"""
List of associated `Triggers`.
"""
return [Trigger(I._api, **trigger) for trigger in
I._api.response('trigger.get', output='extend', hostids=I.id).get('result')]
def __repr__(I):
return "{}[{}]".format(I.__class__.__name__, I.name.val)
PROPS = dict(
hostid = dict(
doc = "ID of the host.",
id = True,
readonly = True,
),
host = dict(
doc = "Technical name of the host.",
),
available = dict(
doc = "Availability of the agent.",
kind = int,
readonly = True,
vals = {
0: 'unknown (default)',
1: 'available',
2: 'unavailable',
},
),
disable_until = dict(
doc = "The next polling time of an unavailable Zabbix agent.",
kind = datetime,
readonly = True,
),
error = dict(
doc = "Error text if Zabbix agent is unavailable.",
readonly = True,
),
errors_from = dict(
doc = "Time when Zabbix agent became unavailable.",
kind = datetime,
readonly = True,
),
flags = dict(
doc = "Origin of the host.",
kind = int,
readonly = True,
vals = {
0: 'a plain host',
4: 'a discovered host',
},
),
ipmi_authtype = dict(
doc = "IPMI authentication algorithm.",
kind = int,
vals = {
-1: 'default',
0 : 'none',
1 : 'MD2',
2 : 'MD5',
4 : 'straight',
5 : 'OEM',
6 : 'RMCP+',
},
),
ipmi_available = dict(
doc = "Availability of IPMI agent.",
kind = int,
readonly = True,
vals = {
0: 'unknown (default)',
1: 'available',
2: 'unavailable',
},
),
ipmi_disable_until = dict(
doc = "The next polling time of an unavailable IPMI agent.",
kind = datetime,
readonly = True,
),
ipmi_error = dict(
doc = "Error text if IPMI agent is unavailable.",
readonly = True,
),
ipmi_errors_from = dict(
doc = "Time when IPMI agent became unavailable",
kind = datetime,
readonly = True,
),
ipmi_password = dict(
doc = "IPMI password",
),
ipmi_privilege = dict(
doc = "IPMI privilege level.",
kind = int,
vals = {
1: 'callback',
2: 'user (default)',
3: 'operator',
4: 'admin',
5: 'OEM',
},
),
ipmi_username = dict(
doc = "IPMI username",
),
jmx_available = dict(
doc = "Availability of JMX agent.",
kind = int,
readonly = True,
vals = {
0: 'unknown (default)',
1: 'available',
2: 'unavailable',
},
),
jmx_disable_until = dict(
doc = "The next polling time of an unavailable JMX agent.",
kind = datetime,
readonly = True,
),
jmx_error = dict(
doc = "Error text if JMX agent is unavailable.",
readonly = True,
),
jmx_errors_from = dict(
doc = "Time when JMX agent became unavailable.",
kind = datetime,
readonly = True,
),
maintenance_from = dict(
doc = "Starting time of the effective maintenance.",
kind = datetime,
readonly = True,
),
maintenance_status = dict(
doc = "Effective maintenance status.",
kind = int,
readonly = True,
vals = {
0: 'no maintenance (default)',
1: 'maintenance in effect',
},
),
maintenance_type = dict(
doc = "Effective maintenance type.",
kind = int,
readonly = True,
vals = {
0: 'maintenance with data collection (default)',
1: 'maintenance without data collection',
},
),
maintenanceid = dict(
doc = "ID of the `Maintenance` that is currently in effect on the host.",
readonly = True,
),
name = dict(
doc = "Visible name of the host, defaults to `host` property value.",
),
proxy_hostid = dict(
doc = "ID of the `Proxy` that is used to monitor the host.",
),
snmp_available = dict(
doc = "Availability of SNMP agent.",
kind = int,
readonly = True,
vals = {
0: 'unknown (default)',
1: 'available',
2: 'unavailable',
},
),
snmp_disable_until = dict(
doc = "The next polling time of an unavailable SNMP agent.",
kind = datetime,
readonly = True,
),
snmp_error = dict(
doc = "Error text if SNMP agent is unavailable.",
readonly = True,
),
snmp_errors_from = dict(
doc = "Time when SNMP agent became unavailable.",
kind = datetime,
readonly = True,
),
status = dict(
doc = "Status and function of the host.",
kind = int,
vals = {
0: 'monitored host (default)',
1: 'unmonitored host',
},
),
)
# These import down here to work around circular imports.
from .hostgroup import HostGroup
from .item import Item
from .trigger import Trigger
| |
"""Functionality for autorenewal and associated juggling of configurations"""
from __future__ import print_function
import copy
import glob
import logging
import os
import traceback
import six
import zope.component
import OpenSSL
from letsencrypt import configuration
from letsencrypt import cli
from letsencrypt import constants
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt import hooks
from letsencrypt import storage
from letsencrypt.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
# These are the items which get pulled out of a renewal configuration
# file's renewalparams and actually used in the client configuration
# during the renewal process. We have to record their types here because
# the renewal configuration process loses this information.
STR_CONFIG_ITEMS = ["config_dir", "logs_dir", "work_dir", "user_agent",
"server", "account", "authenticator", "installer",
"standalone_supported_challenges"]
INT_CONFIG_ITEMS = ["rsa_key_size", "tls_sni_01_port", "http01_port"]
def renewal_conf_files(config):
"""Return /path/to/*.conf in the renewal conf directory"""
return glob.glob(os.path.join(config.renewal_configs_dir, "*.conf"))
def _reconstitute(config, full_path):
"""Try to instantiate a RenewableCert, updating config with relevant items.
This is specifically for use in renewal and enforces several checks
and policies to ensure that we can try to proceed with the renwal
request. The config argument is modified by including relevant options
read from the renewal configuration file.
:param configuration.NamespaceConfig config: configuration for the
current lineage
:param str full_path: Absolute path to the configuration file that
defines this lineage
:returns: the RenewableCert object or None if a fatal error occurred
:rtype: `storage.RenewableCert` or NoneType
"""
try:
renewal_candidate = storage.RenewableCert(
full_path, configuration.RenewerConfiguration(config))
except (errors.CertStorageError, IOError):
logger.warning("Renewal configuration file %s is broken. Skipping.", full_path)
logger.debug("Traceback was:\n%s", traceback.format_exc())
return None
if "renewalparams" not in renewal_candidate.configuration:
logger.warning("Renewal configuration file %s lacks "
"renewalparams. Skipping.", full_path)
return None
renewalparams = renewal_candidate.configuration["renewalparams"]
if "authenticator" not in renewalparams:
logger.warning("Renewal configuration file %s does not specify "
"an authenticator. Skipping.", full_path)
return None
# Now restore specific values along with their data types, if
# those elements are present.
try:
_restore_required_config_elements(config, renewalparams)
_restore_plugin_configs(config, renewalparams)
except (ValueError, errors.Error) as error:
logger.warning(
"An error occurred while parsing %s. The error was %s. "
"Skipping the file.", full_path, error.message)
logger.debug("Traceback was:\n%s", traceback.format_exc())
return None
try:
config.domains = [le_util.enforce_domain_sanity(d)
for d in renewal_candidate.names()]
except errors.ConfigurationError as error:
logger.warning("Renewal configuration file %s references a cert "
"that contains an invalid domain name. The problem "
"was: %s. Skipping.", full_path, error)
return None
return renewal_candidate
def _restore_webroot_config(config, renewalparams):
"""
webroot_map is, uniquely, a dict, and the general-purpose configuration
restoring logic is not able to correctly parse it from the serialized
form.
"""
if "webroot_map" in renewalparams:
if not cli.set_by_cli("webroot_map"):
config.namespace.webroot_map = renewalparams["webroot_map"]
elif "webroot_path" in renewalparams:
logger.info("Ancient renewal conf file without webroot-map, restoring webroot-path")
wp = renewalparams["webroot_path"]
if isinstance(wp, str): # prior to 0.1.0, webroot_path was a string
wp = [wp]
config.namespace.webroot_path = wp
def _restore_plugin_configs(config, renewalparams):
"""Sets plugin specific values in config from renewalparams
:param configuration.NamespaceConfig config: configuration for the
current lineage
:param configobj.Section renewalparams: Parameters from the renewal
configuration file that defines this lineage
"""
# Now use parser to get plugin-prefixed items with correct types
# XXX: the current approach of extracting only prefixed items
# related to the actually-used installer and authenticator
# works as long as plugins don't need to read plugin-specific
# variables set by someone else (e.g., assuming Apache
# configurator doesn't need to read webroot_ variables).
# Note: if a parameter that used to be defined in the parser is no
# longer defined, stored copies of that parameter will be
# deserialized as strings by this logic even if they were
# originally meant to be some other type.
if renewalparams["authenticator"] == "webroot":
_restore_webroot_config(config, renewalparams)
plugin_prefixes = []
else:
plugin_prefixes = [renewalparams["authenticator"]]
if renewalparams.get("installer", None) is not None:
plugin_prefixes.append(renewalparams["installer"])
for plugin_prefix in set(plugin_prefixes):
for config_item, config_value in six.iteritems(renewalparams):
if config_item.startswith(plugin_prefix + "_") and not cli.set_by_cli(config_item):
# Values None, True, and False need to be treated specially,
# As their types aren't handled correctly by configobj
if config_value in ("None", "True", "False"):
# bool("False") == True
# pylint: disable=eval-used
setattr(config.namespace, config_item, eval(config_value))
else:
cast = cli.argparse_type(config_item)
setattr(config.namespace, config_item, cast(config_value))
def _restore_required_config_elements(config, renewalparams):
"""Sets non-plugin specific values in config from renewalparams
:param configuration.NamespaceConfig config: configuration for the
current lineage
:param configobj.Section renewalparams: parameters from the renewal
configuration file that defines this lineage
"""
# string-valued items to add if they're present
for config_item in STR_CONFIG_ITEMS:
if config_item in renewalparams and not cli.set_by_cli(config_item):
value = renewalparams[config_item]
# Unfortunately, we've lost type information from ConfigObj,
# so we don't know if the original was NoneType or str!
if value == "None":
value = None
setattr(config.namespace, config_item, value)
# int-valued items to add if they're present
for config_item in INT_CONFIG_ITEMS:
if config_item in renewalparams and not cli.set_by_cli(config_item):
config_value = renewalparams[config_item]
# the default value for http01_port was None during private beta
if config_item == "http01_port" and config_value == "None":
logger.info("updating legacy http01_port value")
int_value = cli.flag_default("http01_port")
else:
try:
int_value = int(config_value)
except ValueError:
raise errors.Error(
"Expected a numeric value for {0}".format(config_item))
setattr(config.namespace, config_item, int_value)
def should_renew(config, lineage):
"Return true if any of the circumstances for automatic renewal apply."
return True
def _avoid_invalidating_lineage(config, lineage, original_server):
"Do not renew a valid cert with one from a staging server!"
def _is_staging(srv):
return srv == constants.STAGING_URI or "staging" in srv
# Some lineages may have begun with --staging, but then had production certs
# added to them
latest_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, open(lineage.cert).read())
# all our test certs are from happy hacker fake CA, though maybe one day
# we should test more methodically
now_valid = "fake" not in repr(latest_cert.get_issuer()).lower()
if _is_staging(config.server):
if not _is_staging(original_server) or now_valid:
if not config.break_my_certs:
names = ", ".join(lineage.names())
raise errors.Error(
"You've asked to renew/replace a seemingly valid certificate with "
"a test certificate (domains: {0}). We will not do that "
"unless you use the --break-my-certs flag!".format(names))
def renew_cert(config, domains, le_client, lineage):
"Renew a certificate lineage."
renewal_params = lineage.configuration["renewalparams"]
original_server = renewal_params.get("server", cli.flag_default("server"))
_avoid_invalidating_lineage(config, lineage, original_server)
new_certr, new_chain, new_key, _ = le_client.obtain_certificate(domains)
if config.dry_run:
logger.info("Dry run: skipping updating lineage at %s",
os.path.dirname(lineage.cert))
else:
prior_version = lineage.latest_common_version()
new_cert = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, new_certr.body.wrapped)
new_chain = crypto_util.dump_pyopenssl_chain(new_chain)
renewal_conf = configuration.RenewerConfiguration(config.namespace)
# TODO: Check return value of save_successor
lineage.save_successor(prior_version, new_cert, new_key.pem, new_chain, renewal_conf)
lineage.update_all_links_to(lineage.latest_common_version())
hooks.renew_hook(config, domains, lineage.live_dir)
def report(msgs, category):
"Format a results report for a category of renewal outcomes"
lines = ("%s (%s)" % (m, category) for m in msgs)
return " " + "\n ".join(lines)
def _renew_describe_results(config, renew_successes, renew_failures,
renew_skipped, parse_failures):
out = []
notify = out.append
if config.dry_run:
notify("** DRY RUN: simulating 'letsencrypt renew' close to cert expiry")
notify("** (The test certificates below have not been saved.)")
notify("")
if renew_skipped:
notify("The following certs are not due for renewal yet:")
notify(report(renew_skipped, "skipped"))
if not renew_successes and not renew_failures:
notify("No renewals were attempted.")
elif renew_successes and not renew_failures:
notify("Congratulations, all renewals succeeded. The following certs "
"have been renewed:")
notify(report(renew_successes, "success"))
elif renew_failures and not renew_successes:
notify("All renewal attempts failed. The following certs could not be "
"renewed:")
notify(report(renew_failures, "failure"))
elif renew_failures and renew_successes:
notify("The following certs were successfully renewed:")
notify(report(renew_successes, "success"))
notify("\nThe following certs could not be renewed:")
notify(report(renew_failures, "failure"))
if parse_failures:
notify("\nAdditionally, the following renewal configuration files "
"were invalid: ")
notify(parse_failures, "parsefail")
if config.dry_run:
notify("** DRY RUN: simulating 'letsencrypt renew' close to cert expiry")
notify("** (The test certificates above have not been saved.)")
if config.quiet and not (renew_failures or parse_failures):
return
print("\n".join(out))
def renew_all_lineages(config):
"""Examine each lineage; renew if due and report results"""
if config.domains != []:
raise errors.Error("Currently, the renew verb is only capable of "
"renewing all installed certificates that are due "
"to be renewed; individual domains cannot be "
"specified with this action. If you would like to "
"renew specific certificates, use the certonly "
"command. The renew verb may provide other options "
"for selecting certificates to renew in the future.")
renewer_config = configuration.RenewerConfiguration(config)
renew_successes = []
renew_failures = []
renew_skipped = []
parse_failures = []
for renewal_file in renewal_conf_files(renewer_config):
disp = zope.component.getUtility(interfaces.IDisplay)
disp.notification("Processing " + renewal_file, pause=False)
lineage_config = copy.deepcopy(config)
# Note that this modifies config (to add back the configuration
# elements from within the renewal configuration file).
try:
renewal_candidate = _reconstitute(lineage_config, renewal_file)
except Exception as e: # pylint: disable=broad-except
logger.warning("Renewal configuration file %s produced an "
"unexpected error: %s. Skipping.", renewal_file, e)
logger.debug("Traceback was:\n%s", traceback.format_exc())
parse_failures.append(renewal_file)
continue
try:
if renewal_candidate is None:
parse_failures.append(renewal_file)
else:
# XXX: ensure that each call here replaces the previous one
zope.component.provideUtility(lineage_config)
if should_renew(lineage_config, renewal_candidate):
plugins = plugins_disco.PluginsRegistry.find_all()
from letsencrypt import main
main.obtain_cert(lineage_config, plugins, renewal_candidate)
renew_successes.append(renewal_candidate.fullchain)
else:
renew_skipped.append(renewal_candidate.fullchain)
except Exception as e: # pylint: disable=broad-except
# obtain_cert (presumably) encountered an unanticipated problem.
logger.warning("Attempting to renew cert from %s produced an "
"unexpected error: %s. Skipping.", renewal_file, e)
logger.debug("Traceback was:\n%s", traceback.format_exc())
renew_failures.append(renewal_candidate.fullchain)
# Describe all the results
_renew_describe_results(config, renew_successes, renew_failures,
renew_skipped, parse_failures)
if renew_failures or parse_failures:
raise errors.Error("{0} renew failure(s), {1} parse failure(s)".format(
len(renew_failures), len(parse_failures)))
else:
logger.debug("no renewal failures")
| |
import os
import re
import collections
from uitools.qt import QtGui
from maya import cmds, mel
from sgfs.ui import product_select
RefEdit = collections.namedtuple('RefEdit', ('command', 'namespaces', 'nodes', 'source'))
class RefEditSelector(product_select.Layout):
def _setup_sections(self):
super(RefEditSelector, self)._setup_sections()
self.register_section('Ref Edits', self._iter_files)
def _iter_files(self, step_path):
if step_path is None:
return
refedit_dir = os.path.join(step_path, 'maya', 'data', 'refedits')
if not os.path.exists(refedit_dir):
return
for name in os.listdir(refedit_dir):
if name.startswith('.'):
continue
if not name.endswith('.mel'):
continue
m = re.search(r'v(\d+)(?:_r(\d+))?', name)
if m:
priority = tuple(int(x or 0) for x in m.groups())
else:
priority = (0, 0)
refedit_path = os.path.join(refedit_dir, name)
yield name, refedit_path, priority
class Dialog(QtGui.QDialog):
def __init__(self):
super(Dialog, self).__init__()
self._setup_ui()
def _setup_ui(self):
self.setWindowTitle("Reference Edit Import")
self.setLayout(QtGui.QVBoxLayout())
self._selector = RefEditSelector(parent=self)
# Select as far as we can.
path = (
cmds.file(q=True, sceneName=True) or
cmds.workspace(q=True, fullName=True) or
None
)
if path is not None:
self._selector.setPath(path, allow_partial=True)
self.layout().addLayout(self._selector)
self._type_box = QtGui.QGroupBox("Edit Types")
self._type_box.setLayout(QtGui.QVBoxLayout())
self.layout().addWidget(self._type_box)
self._option_box = QtGui.QGroupBox("Options")
self._option_box.setLayout(QtGui.QVBoxLayout())
self.layout().addWidget(self._option_box)
self._only_selected_checkbox = QtGui.QCheckBox("Only Apply to Selected Nodes", checked=True)
self._only_selected_checkbox.stateChanged.connect(self._node_filters_changed)
self._option_box.layout().addWidget(self._only_selected_checkbox)
self._on_selection_parents_checkbox = QtGui.QCheckBox("...or Parents of Selected Nodes", checked=True)
self._on_selection_parents_checkbox.stateChanged.connect(self._node_filters_changed)
self._option_box.layout().addWidget(self._on_selection_parents_checkbox)
self._on_selection_children_checkbox = QtGui.QCheckBox("...or Children of Selected Nodes", checked=True)
self._on_selection_children_checkbox.stateChanged.connect(self._node_filters_changed)
self._option_box.layout().addWidget(self._on_selection_children_checkbox)
self._node_box = QtGui.QGroupBox("Nodes")
self._node_box.setLayout(QtGui.QVBoxLayout())
self.layout().addWidget(self._node_box)
button = QtGui.QPushButton("Apply Edits")
button.clicked.connect(self._on_reference)
self.layout().addWidget(button)
self._selector.path_changed = self._path_changed
self._path_changed(self._selector.path())
def _parse_file(self, path):
self._edits = []
for line in open(path):
line = line.strip()
if not line or line.startswith('//'):
continue
command = line.split()[0]
namespaces = re.findall(r'(\w+):', line)
nodes = re.findall(r'(\|[\|:\w]+)', line)
self._edits.append(RefEdit(
command=command,
nodes=set(nodes),
namespaces=set(namespaces),
source=line,
))
def _node_filters_changed(self, *args):
self._path_changed(self._path)
def _path_changed(self, path):
self._path = path
for child in self._type_box.children():
if isinstance(child, QtGui.QWidget):
child.hide()
child.destroy()
for child in self._node_box.children():
if isinstance(child, QtGui.QWidget):
child.hide()
child.destroy()
if path is None:
self._type_box.layout().addWidget(QtGui.QLabel("Nothing"))
self._option_box.layout().addWidget(QtGui.QLabel("Nothing"))
return
self._parse_file(path)
self._command_boxes = []
for command in sorted(set(e.command for e in self._edits)):
checkbox = QtGui.QCheckBox(command)
checkbox.setChecked(command == 'setAttr')
self._command_boxes.append(checkbox)
self._type_box.layout().addWidget(checkbox)
self._node_boxes = []
all_nodes = set()
for e in self._edits:
all_nodes.update(e.nodes)
node_filter = None
if self._only_selected_checkbox.isChecked():
node_filter = set(cmds.ls(selection=True, long=True) or ())
if self._on_selection_parents_checkbox.isChecked():
node_filter = node_filter or set()
visited = set()
to_visit = set(cmds.ls(selection=True, long=True) or ())
while to_visit:
node = to_visit.pop()
node_filter.add(node)
if node in visited:
continue
visited.add(node)
to_visit.update(cmds.listRelatives(node, allParents=True, fullPath=True) or ())
if self._on_selection_children_checkbox.isChecked():
node_filter = node_filter or set()
for node in cmds.ls(selection=True, long=True) or ():
node_filter.update(cmds.listRelatives(node, allDescendents=True, fullPath=True) or ())
if node_filter is not None:
all_nodes.intersection_update(node_filter)
for node in sorted(all_nodes):
checkbox = QtGui.QCheckBox(node, checked=True)
self._node_boxes.append(checkbox)
self._node_box.layout().addWidget(checkbox)
# existing = [cmds.file(ref, q=True, namespace=True) for ref in cmds.file(q=True, reference=True) or []]
# self._namespace_menus = []
# namespaces = set()
# for edit in self._edits:
# namespaces.update(edit.namespaces)
# for namespace in sorted(namespaces):
# layout = QtGui.QHBoxLayout()
# layout.addWidget(QtGui.QLabel(namespace))
# combo = QtGui.QComboBox()
# combo.addItem('<None>')
# for name in existing:
# combo.addItem(name)
# if name == namespace:
# combo.setCurrentIndex(combo.count() - 1)
# layout.addWidget(combo)
# self._option_box.layout().addLayout(layout)
def _on_reference(self, *args):
do_command = {}
for checkbox in self._command_boxes:
do_command[str(checkbox.text())] = checkbox.isChecked()
do_node = {}
for checkbox in self._node_boxes:
do_node[str(checkbox.text())] = checkbox.isChecked()
applied = 0
failed = 0
for edit in self._edits:
if not do_command.get(edit.command):
continue
if not all(do_node.get(n) for n in edit.nodes):
continue
try:
mel.eval(edit.source)
except Exception as e:
cmds.warning(str(e))
failed += 1
else:
applied += 1
(QtGui.QMessageBox.warning if failed else QtGui.QMessageBox.information)(
self,
"Applied Reference Edits",
"Applied %d of %d edits with %d failures." % (applied, len(self._edits), failed)
)
self.close()
def __before_reload__():
if dialog:
dialog.close()
dialog = None
def run():
global dialog
if dialog:
dialog.close()
dialog = Dialog()
dialog.show()
| |
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import RoomError, SynapseError
from synapse.streams.config import PaginationConfig
from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
from synapse.util import unwrapFirstError
from synapse.util.logcontext import PreserveLoggingContext
from synapse.types import UserID, RoomStreamToken
from ._base import BaseHandler
import logging
logger = logging.getLogger(__name__)
class MessageHandler(BaseHandler):
def __init__(self, hs):
super(MessageHandler, self).__init__(hs)
self.hs = hs
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
@defer.inlineCallbacks
def get_message(self, msg_id=None, room_id=None, sender_id=None,
user_id=None):
""" Retrieve a message.
Args:
msg_id (str): The message ID to obtain.
room_id (str): The room where the message resides.
sender_id (str): The user ID of the user who sent the message.
user_id (str): The user ID of the user making this request.
Returns:
The message, or None if no message exists.
Raises:
SynapseError if something went wrong.
"""
yield self.auth.check_joined_room(room_id, user_id)
# Pull out the message from the db
# msg = yield self.store.get_message(
# room_id=room_id,
# msg_id=msg_id,
# user_id=sender_id
# )
# TODO (erikj): Once we work out the correct c-s api we need to think
# on how to do this.
defer.returnValue(None)
@defer.inlineCallbacks
def get_messages(self, user_id=None, room_id=None, pagin_config=None,
feedback=False, as_client_event=True):
"""Get messages in a room.
Args:
user_id (str): The user requesting messages.
room_id (str): The room they want messages from.
pagin_config (synapse.api.streams.PaginationConfig): The pagination
config rules to apply, if any.
feedback (bool): True to get compressed feedback with the messages
as_client_event (bool): True to get events in client-server format.
Returns:
dict: Pagination API results
"""
yield self.auth.check_joined_room(room_id, user_id)
data_source = self.hs.get_event_sources().sources["room"]
if not pagin_config.from_token:
pagin_config.from_token = (
yield self.hs.get_event_sources().get_current_token(
direction='b'
)
)
room_token = RoomStreamToken.parse(pagin_config.from_token.room_key)
if room_token.topological is None:
raise SynapseError(400, "Invalid token")
yield self.hs.get_handlers().federation_handler.maybe_backfill(
room_id, room_token.topological
)
user = UserID.from_string(user_id)
events, next_key = yield data_source.get_pagination_rows(
user, pagin_config.get_source_config("room"), room_id
)
next_token = pagin_config.from_token.copy_and_replace(
"room_key", next_key
)
time_now = self.clock.time_msec()
chunk = {
"chunk": [
serialize_event(e, time_now, as_client_event) for e in events
],
"start": pagin_config.from_token.to_string(),
"end": next_token.to_string(),
}
defer.returnValue(chunk)
@defer.inlineCallbacks
def create_and_send_event(self, event_dict, ratelimit=True,
client=None, txn_id=None):
""" Given a dict from a client, create and handle a new event.
Creates an FrozenEvent object, filling out auth_events, prev_events,
etc.
Adds display names to Join membership events.
Persists and notifies local clients and federation.
Args:
event_dict (dict): An entire event
"""
builder = self.event_builder_factory.new(event_dict)
self.validator.validate_new(builder)
if ratelimit:
self.ratelimit(builder.user_id)
# TODO(paul): Why does 'event' not have a 'user' object?
user = UserID.from_string(builder.user_id)
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
if membership == Membership.JOIN:
joinee = UserID.from_string(builder.state_key)
# If event doesn't include a display name, add one.
yield self.distributor.fire(
"collect_presencelike_data",
joinee,
builder.content
)
if client is not None:
if client.token_id is not None:
builder.internal_metadata.token_id = client.token_id
if client.device_id is not None:
builder.internal_metadata.device_id = client.device_id
if txn_id is not None:
builder.internal_metadata.txn_id = txn_id
event, context = yield self._create_new_client_event(
builder=builder,
)
if event.type == EventTypes.Member:
member_handler = self.hs.get_handlers().room_member_handler
yield member_handler.change_membership(event, context)
else:
yield self.handle_new_client_event(
event=event,
context=context,
)
if event.type == EventTypes.Message:
presence = self.hs.get_handlers().presence_handler
with PreserveLoggingContext():
presence.bump_presence_active_time(user)
defer.returnValue(event)
@defer.inlineCallbacks
def get_room_data(self, user_id=None, room_id=None,
event_type=None, state_key=""):
""" Get data from a room.
Args:
event : The room path event
Returns:
The path data content.
Raises:
SynapseError if something went wrong.
"""
have_joined = yield self.auth.check_joined_room(room_id, user_id)
if not have_joined:
raise RoomError(403, "User not in room.")
data = yield self.state_handler.get_current_state(
room_id, event_type, state_key
)
defer.returnValue(data)
@defer.inlineCallbacks
def get_feedback(self, event_id):
# yield self.auth.check_joined_room(room_id, user_id)
# Pull out the feedback from the db
fb = yield self.store.get_feedback(event_id)
if fb:
defer.returnValue(fb)
defer.returnValue(None)
@defer.inlineCallbacks
def get_state_events(self, user_id, room_id):
"""Retrieve all state events for a given room.
Args:
user_id(str): The user requesting state events.
room_id(str): The room ID to get all state events from.
Returns:
A list of dicts representing state events. [{}, {}, {}]
"""
yield self.auth.check_joined_room(room_id, user_id)
# TODO: This is duplicating logic from snapshot_all_rooms
current_state = yield self.state_handler.get_current_state(room_id)
now = self.clock.time_msec()
defer.returnValue(
[serialize_event(c, now) for c in current_state.values()]
)
@defer.inlineCallbacks
def snapshot_all_rooms(self, user_id=None, pagin_config=None,
feedback=False, as_client_event=True):
"""Retrieve a snapshot of all rooms the user is invited or has joined.
This snapshot may include messages for all rooms where the user is
joined, depending on the pagination config.
Args:
user_id (str): The ID of the user making the request.
pagin_config (synapse.api.streams.PaginationConfig): The pagination
config used to determine how many messages *PER ROOM* to return.
feedback (bool): True to get feedback along with these messages.
as_client_event (bool): True to get events in client-server format.
Returns:
A list of dicts with "room_id" and "membership" keys for all rooms
the user is currently invited or joined in on. Rooms where the user
is joined on, may return a "messages" key with messages, depending
on the specified PaginationConfig.
"""
room_list = yield self.store.get_rooms_for_user_where_membership_is(
user_id=user_id,
membership_list=[Membership.INVITE, Membership.JOIN]
)
user = UserID.from_string(user_id)
rooms_ret = []
now_token = yield self.hs.get_event_sources().get_current_token()
presence_stream = self.hs.get_event_sources().sources["presence"]
pagination_config = PaginationConfig(from_token=now_token)
presence, _ = yield presence_stream.get_pagination_rows(
user, pagination_config.get_source_config("presence"), None
)
public_room_ids = yield self.store.get_public_room_ids()
limit = pagin_config.limit
if limit is None:
limit = 10
@defer.inlineCallbacks
def handle_room(event):
d = {
"room_id": event.room_id,
"membership": event.membership,
"visibility": (
"public" if event.room_id in public_room_ids
else "private"
),
}
if event.membership == Membership.INVITE:
d["inviter"] = event.sender
rooms_ret.append(d)
if event.membership != Membership.JOIN:
return
try:
(messages, token), current_state = yield defer.gatherResults(
[
self.store.get_recent_events_for_room(
event.room_id,
limit=limit,
end_token=now_token.room_key,
),
self.state_handler.get_current_state(
event.room_id
),
]
).addErrback(unwrapFirstError)
start_token = now_token.copy_and_replace("room_key", token[0])
end_token = now_token.copy_and_replace("room_key", token[1])
time_now = self.clock.time_msec()
d["messages"] = {
"chunk": [
serialize_event(m, time_now, as_client_event)
for m in messages
],
"start": start_token.to_string(),
"end": end_token.to_string(),
}
d["state"] = [
serialize_event(c, time_now, as_client_event)
for c in current_state.values()
]
except:
logger.exception("Failed to get snapshot")
yield defer.gatherResults(
[handle_room(e) for e in room_list],
consumeErrors=True
).addErrback(unwrapFirstError)
ret = {
"rooms": rooms_ret,
"presence": presence,
"end": now_token.to_string()
}
defer.returnValue(ret)
@defer.inlineCallbacks
def room_initial_sync(self, user_id, room_id, pagin_config=None,
feedback=False):
current_state = yield self.state.get_current_state(
room_id=room_id,
)
yield self.auth.check_joined_room(
room_id, user_id,
current_state=current_state
)
# TODO(paul): I wish I was called with user objects not user_id
# strings...
auth_user = UserID.from_string(user_id)
# TODO: These concurrently
time_now = self.clock.time_msec()
state = [
serialize_event(x, time_now)
for x in current_state.values()
]
member_event = current_state.get((EventTypes.Member, user_id,))
now_token = yield self.hs.get_event_sources().get_current_token()
limit = pagin_config.limit if pagin_config else None
if limit is None:
limit = 10
room_members = [
m for m in current_state.values()
if m.type == EventTypes.Member
and m.content["membership"] == Membership.JOIN
]
presence_handler = self.hs.get_handlers().presence_handler
@defer.inlineCallbacks
def get_presence():
presence_defs = yield defer.DeferredList(
[
presence_handler.get_state(
target_user=UserID.from_string(m.user_id),
auth_user=auth_user,
as_event=True,
check_auth=False,
)
for m in room_members
],
consumeErrors=True,
)
defer.returnValue([p for success, p in presence_defs if success])
presence, (messages, token) = yield defer.gatherResults(
[
get_presence(),
self.store.get_recent_events_for_room(
room_id,
limit=limit,
end_token=now_token.room_key,
)
],
consumeErrors=True,
).addErrback(unwrapFirstError)
start_token = now_token.copy_and_replace("room_key", token[0])
end_token = now_token.copy_and_replace("room_key", token[1])
time_now = self.clock.time_msec()
defer.returnValue({
"membership": member_event.membership,
"room_id": room_id,
"messages": {
"chunk": [serialize_event(m, time_now) for m in messages],
"start": start_token.to_string(),
"end": end_token.to_string(),
},
"state": state,
"presence": presence
})
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import argparse
from configparser import RawConfigParser as ConfigParser
class Config(object):
def __init__(self, configure_file):
self._args = self.__parse_args(configure_file)
self._config = self.__parse_config()
def __parse_args(self, configure_file):
parser = argparse.ArgumentParser(description="Usage: %prog [options] ")
parser.add_argument("-c", "--config-file",
dest="config",
type=str,
help="path to the config file")
parser.add_argument("-d", "--database",
dest="database",
help="mysql database or name patern mysql",
required=True)
parser.add_argument("-u", "--username",
dest="username",
help="mysql username")
parser.add_argument("-p", "--password",
dest="password",
help="mysql password")
parser.add_argument("-o", "--output",
dest="output",
help="path to the output directory")
parser.add_argument("-l", "--log",
dest="logfile",
help="path to the log file")
parser.add_argument("-g", "--gzip",
dest="gzip",
action='store_true',
default=False,
help="gzip mysqldump")
parser.add_argument("-e", "--encrypt",
dest="encrypt",
action='store_true',
default=False,
help="encrypt mysqldump with zlib")
parser.add_argument("--encrypt-pass",
dest="encrypt_pass",
type=str,
help="use with --encrypt, automatically turn --encrypt on and --gzip off")
parser.add_argument("--compress-level",
dest="compress_level",
type=int,
default=6,
help="use with --encrypt")
parser.add_argument("--storage",
dest="storage",
type=str,
help="copy mysqldump to Nameserver")
parser.add_argument("--sendmail",
dest="sendmail",
type=str,
help="send email")
parser.add_argument("--options",
dest="options",
type=str,
help="options for mysqldump")
parser.add_argument("--debug",
dest="debug",
default=False,
action='store_true',
help="debug")
args = parser.parse_args()
# path file configure
self._config_file = os.path.abspath(args.config) if args.config else configure_file
if args.logfile:
# create
dirlog = os.path.dirname(args.logfile)
if not os.path.exists(dirlog):
os.makedirs(dirlog)
if args.encrypt_pass:
args.gzip = False
args.encrypt = True
return args
def __parse_config(self):
parser = ConfigParser()
if not os.path.exists(self._config_file):
raise IOError('Configuration file not found at: {0}, exit'.format(os.path.abspath(self._config_file)))
parser.read(self._config_file)
return parser
@property
def logfile(self):
return self._args.logfile
@property
def is_debug(self):
return bool(self._args.debug)
@property
def db_host(self):
return self._config.get('mysql=' + self._args.database, 'host', fallback='')
@property
def db_port(self):
return self._config.get('mysql=' + self._args.database, 'port', fallback=None)
@property
def database(self):
return self._config.get('mysql=' + self._args.database, 'base', fallback=self._args.database)
@property
def db_user(self):
return self._config.get('mysql=' + self._args.database, 'user', fallback=self._args.username)
@property
def db_pass(self):
return self._config.get('mysql=' + self._args.database, 'pass', fallback=self._args.password)
@property
def db_options(self):
return self._config.get('mysql=' + self._args.database, 'options', fallback='')
@property
def save_filename(self):
filename = self._config.get('backup', 'save_tpl', fallback='{base}.{ext}')
return filename.format(pref=self.__pref_format(), base=self.database, ext='{ext}')
@property
def save_filename_mask(self):
filename = self._config.get('backup', 'save_tpl', fallback='{base}.{ext}')
return filename.format(pref='*', base=self.database, ext='*')
@property
def save_filepath(self):
path = self._config.get('backup', 'save_dir', fallback=self._args.output)
if path.find('./') == 0:
dir22 = path.replace("./", "")
path = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), *dir22.split('/'))
if not os.path.exists(path):
os.makedirs(path)
# OSError
return path.format(base=self.database)
@property
def max_copies(self):
return int(self._config.get('backup', 'max_copies', fallback=1))
@property
def is_gzip(self):
return self._config.get('backup', 'save_gzip', fallback='True') == 'True' or self._args.gzip
@property
def is_encrypt(self):
return bool(self._config.get('backup', 'encrypt_pass', fallback=self._args.encrypt))
@property
def compress_level(self):
return self._config.get('backup', 'compress_level', fallback=self._args.compress_level)
@property
def encrypt_pass(self):
return self._config.get('backup', 'encrypt_pass', fallback=self._args.encrypt_pass)
# storage
@property
def use_storage(self):
return True if self._args.storage else False
@property
def storage_transport(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'transport', fallback='webdav')
@property
def storage_host(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'host', fallback='')
@property
def storage_user(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'user', fallback='')
@property
def storage_pass(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'pass', fallback='')
@property
def storage_proxy_host(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'proxy_host', fallback=None)
@property
def storage_proxy_user(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'proxy_user', fallback=None)
@property
def storage_proxy_pass(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'proxy_pass', fallback=None)
@property
def storage_cert_path(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'cert_path', fallback=None)
@property
def storage_key_path(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'key_path', fallback=None)
@property
def storage_token(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'token', fallback=None)
@property
def storage_port(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'port', fallback=None)
@property
def storage_dir(self):
if self.use_storage:
return self._config.get('storage=' + self._args.storage, 'dir', fallback='/')
@property
def storage_max_copies(self):
if self.use_storage:
return int(self._config.get('storage=' + self._args.storage, 'max_copies', fallback=1))
# Send email
@property
def use_sendmail(self):
return True if self._args.sendmail else False
@property
def email_transport(self):
if self.use_sendmail:
return self._config.get('sendmail=' + self._args.sendmail, 'transport', fallback='smtp')
@property
def email_from(self):
if self.use_sendmail:
return self._config.get('sendmail=' + self._args.sendmail, 'from', fallback=None)
@property
def email_to(self):
if self.use_sendmail:
return self._config.get('sendmail=' + self._args.sendmail, 'to', fallback=None)
@property
def email_cc(self):
if self.use_sendmail:
return self._config.get('sendmail=' + self._args.sendmail, 'cc', fallback=None)
@property
def email_bcc(self):
if self.use_sendmail:
return self._config.get('sendmail=' + self._args.sendmail, 'bcc', fallback=None)
@property
def email_subject(self):
if self.use_sendmail:
return self._config.get('sendmail=' + self._args.sendmail, 'subject', fallback='')
# SMTP settings
@property
def sendmail_api_domain(self):
if self.use_sendmail:
return self._config.get('sendmail', 'domen', fallback=None)
@property
def sendmail_api_token(self):
if self.use_sendmail:
return self._config.get('sendmail', 'token', fallback=None)
@property
def sendmail_host(self):
if self.use_sendmail:
return self._config.get('sendmail', 'host', fallback=None)
@property
def sendmail_port(self):
if self.use_sendmail:
return int(self._config.get('sendmail', 'port', fallback=0))
@property
def sendmail_ssl(self):
if self.use_sendmail:
return bool(self._config.get('sendmail', 'ssl', fallback='False') == 'True')
@property
def sendmail_tls(self):
if self.use_sendmail:
return bool(self._config.get('sendmail', 'tls', fallback='False') == 'True')
@property
def sendmail_user(self):
if self.use_sendmail:
return self._config.get('sendmail', 'user', fallback='')
@property
def sendmail_pass(self):
if self.use_sendmail:
return self._config.get('sendmail', 'pass', fallback='')
# sendmail setting
@property
def email_limit_size_attach(self):
if self.use_sendmail:
return self.__format_text_size(self._config.get('sendmail', 'limit_size_source', fallback=0))
@property
def email_chunk_max_size(self):
if self.use_sendmail:
return self.__format_text_size(self._config.get('sendmail', 'chunk_max_size', fallback=0))
def __pref_format(self):
pref = self._config.get('backup', 'save_pref', fallback='%Y%m%d')
if '%' in pref:
pref = time.strftime(pref)
return pref
@staticmethod
def __format_text_size(str_size):
if isinstance(str_size, int):
return str_size
symbols = {'K': 2**10, 'M': 2**20}
letter = str_size[-1].strip().upper()
if letter in symbols:
str_size = int(str_size[:-1]) * int(symbols[letter])
return str_size
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.utils.misc import unbroadcast
import copy
from .wcs import WCS, WCSSUB_LONGITUDE, WCSSUB_LATITUDE, WCSSUB_CELESTIAL
__doctest_skip__ = ['wcs_to_celestial_frame', 'celestial_frame_to_wcs']
__all__ = ['add_stokes_axis_to_wcs', 'celestial_frame_to_wcs',
'wcs_to_celestial_frame', 'proj_plane_pixel_scales',
'proj_plane_pixel_area', 'is_proj_plane_distorted',
'non_celestial_pixel_scales', 'skycoord_to_pixel',
'pixel_to_skycoord', 'custom_wcs_to_frame_mappings',
'custom_frame_to_wcs_mappings', 'pixel_to_pixel',
'local_partial_pixel_derivatives', 'fit_wcs_from_points']
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = 'STOKES'
newwcs.wcs.cname[add_before_ind] = 'STOKES'
return newwcs
def _wcs_to_celestial_frame_builtin(wcs):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (FK4, FK4NoETerms, FK5, ICRS, ITRS,
Galactic, SphericalRepresentation)
# Import astropy.time here otherwise setup.py fails before extensions are compiled
from astropy.time import Time
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[wcs.wcs.lng][:4]
ycoord = wcs.wcs.ctype[wcs.wcs.lat][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == '' and xcoord == 'RA--' and ycoord == 'DEC-':
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == 'FK4':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4(equinox=equinox)
elif radesys == 'FK4-NO-E':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4NoETerms(equinox=equinox)
elif radesys == 'FK5':
if equinox is not None:
equinox = Time(equinox, format='jyear')
frame = FK5(equinox=equinox)
elif radesys == 'ICRS':
frame = ICRS()
else:
if xcoord == 'GLON' and ycoord == 'GLAT':
frame = Galactic()
elif xcoord == 'TLON' and ycoord == 'TLAT':
# The default representation for ITRS is cartesian, but for WCS
# purposes, we need the spherical representation.
frame = ITRS(representation_type=SphericalRepresentation,
obstime=wcs.wcs.dateobs or None)
else:
frame = None
return frame
def _celestial_frame_to_wcs_builtin(frame, projection='TAN'):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import BaseRADecFrame, FK4, FK4NoETerms, FK5, ICRS, ITRS, Galactic
# Create a 2-dimensional WCS
wcs = WCS(naxis=2)
if isinstance(frame, BaseRADecFrame):
xcoord = 'RA--'
ycoord = 'DEC-'
if isinstance(frame, ICRS):
wcs.wcs.radesys = 'ICRS'
elif isinstance(frame, FK4NoETerms):
wcs.wcs.radesys = 'FK4-NO-E'
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK4):
wcs.wcs.radesys = 'FK4'
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK5):
wcs.wcs.radesys = 'FK5'
wcs.wcs.equinox = frame.equinox.jyear
else:
return None
elif isinstance(frame, Galactic):
xcoord = 'GLON'
ycoord = 'GLAT'
elif isinstance(frame, ITRS):
xcoord = 'TLON'
ycoord = 'TLAT'
wcs.wcs.radesys = 'ITRS'
wcs.wcs.dateobs = frame.obstime.utc.isot
else:
return None
wcs.wcs.ctype = [xcoord + '-' + projection, ycoord + '-' + projection]
return wcs
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]]
class custom_wcs_to_frame_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, '__call__'):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
# Backward-compatibility
custom_frame_mappings = custom_wcs_to_frame_mappings
class custom_frame_to_wcs_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, '__call__'):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError("Could not determine celestial frame corresponding to "
"the specified WCS object")
def celestial_frame_to_wcs(frame, projection='TAN'):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError("Could not determine WCS corresponding to the specified "
"coordinate frame.")
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : `~numpy.ndarray`
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return (not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or
_has_distortion(cwcs))
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if (pixarea == 0.0):
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return (cd_unitary_err < maxerr)
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1-np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd))*u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(getattr(wcs, dist_attr) is not None
for dist_attr in ['cpdis1', 'cpdis2', 'det2im1', 'det2im2', 'sip'])
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode='all'):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == 'all':
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == 'wcs':
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode='all', cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or `numpy.ndarray`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : Whatever ``cls`` is (a subclass of `~astropy.coordinates.SkyCoord`)
The celestial coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == 'all':
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == 'wcs':
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
def _unique_with_order_preserved(items):
"""
Return a list of unique items in the list provided, preserving the order
in which they are found.
"""
new_items = []
for item in items:
if item not in new_items:
new_items.append(item)
return new_items
def _pixel_to_world_correlation_matrix(wcs):
"""
Return a correlation matrix between the pixel coordinates and the
high level world coordinates, along with the list of high level world
coordinate classes.
The shape of the matrix is ``(n_world, n_pix)``, where ``n_world`` is the
number of high level world coordinates.
"""
# We basically want to collapse the world dimensions together that are
# combined into the same high-level objects.
# Get the following in advance as getting these properties can be expensive
all_components = wcs.low_level_wcs.world_axis_object_components
all_classes = wcs.low_level_wcs.world_axis_object_classes
axis_correlation_matrix = wcs.low_level_wcs.axis_correlation_matrix
components = _unique_with_order_preserved([c[0] for c in all_components])
matrix = np.zeros((len(components), wcs.pixel_n_dim), dtype=bool)
for iworld in range(wcs.world_n_dim):
iworld_unique = components.index(all_components[iworld][0])
matrix[iworld_unique] |= axis_correlation_matrix[iworld]
classes = [all_classes[component][0] for component in components]
return matrix, classes
def _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out):
"""
Correlation matrix between the input and output pixel coordinates for a
pixel -> world -> pixel transformation specified by two WCS instances.
The first WCS specified is the one used for the pixel -> world
transformation and the second WCS specified is the one used for the world ->
pixel transformation. The shape of the matrix is
``(n_pixel_out, n_pixel_in)``.
"""
matrix1, classes1 = _pixel_to_world_correlation_matrix(wcs_in)
matrix2, classes2 = _pixel_to_world_correlation_matrix(wcs_out)
if len(classes1) != len(classes2):
raise ValueError("The two WCS return a different number of world coordinates")
# Check if classes match uniquely
unique_match = True
mapping = []
for class1 in classes1:
matches = classes2.count(class1)
if matches == 0:
raise ValueError("The world coordinate types of the two WCS do not match")
elif matches > 1:
unique_match = False
break
else:
mapping.append(classes2.index(class1))
if unique_match:
# Classes are unique, so we need to re-order matrix2 along the world
# axis using the mapping we found above.
matrix2 = matrix2[mapping]
elif classes1 != classes2:
raise ValueError("World coordinate order doesn't match and automatic matching is ambiguous")
matrix = np.matmul(matrix2.T, matrix1)
return matrix
def _split_matrix(matrix):
"""
Given an axis correlation matrix from a WCS object, return information about
the individual WCS that can be split out.
The output is a list of tuples, where each tuple contains a list of
pixel dimensions and a list of world dimensions that can be extracted to
form a new WCS. For example, in the case of a spectral cube with the first
two world coordinates being the celestial coordinates and the third
coordinate being an uncorrelated spectral axis, the matrix would look like::
array([[ True, True, False],
[ True, True, False],
[False, False, True]])
and this function will return ``[([0, 1], [0, 1]), ([2], [2])]``.
"""
pixel_used = []
split_info = []
for ipix in range(matrix.shape[1]):
if ipix in pixel_used:
continue
pixel_include = np.zeros(matrix.shape[1], dtype=bool)
pixel_include[ipix] = True
n_pix_prev, n_pix = 0, 1
while n_pix > n_pix_prev:
world_include = matrix[:, pixel_include].any(axis=1)
pixel_include = matrix[world_include, :].any(axis=0)
n_pix_prev, n_pix = n_pix, np.sum(pixel_include)
pixel_indices = list(np.nonzero(pixel_include)[0])
world_indices = list(np.nonzero(world_include)[0])
pixel_used.extend(pixel_indices)
split_info.append((pixel_indices, world_indices))
return split_info
def pixel_to_pixel(wcs_in, wcs_out, *inputs):
"""
Transform pixel coordinates in a dataset with a WCS to pixel coordinates
in another dataset with a different WCS.
This function is designed to efficiently deal with input pixel arrays that
are broadcasted views of smaller arrays, and is compatible with any
APE14-compliant WCS.
Parameters
----------
wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the original dataset which complies with the
high-level shared APE 14 WCS API.
wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the target dataset which complies with the
high-level shared APE 14 WCS API.
*inputs :
Scalars or arrays giving the pixel coordinates to transform.
"""
# Shortcut for scalars
if np.isscalar(inputs[0]):
world_outputs = wcs_in.pixel_to_world(*inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
return wcs_out.world_to_pixel(*world_outputs)
# Remember original shape
original_shape = inputs[0].shape
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
split_info = _split_matrix(matrix)
outputs = [None] * wcs_out.pixel_n_dim
for (pixel_in_indices, pixel_out_indices) in split_info:
pixel_inputs = []
for ipix in range(wcs_in.pixel_n_dim):
if ipix in pixel_in_indices:
pixel_inputs.append(unbroadcast(inputs[ipix]))
else:
pixel_inputs.append(inputs[ipix].flat[0])
pixel_inputs = np.broadcast_arrays(*pixel_inputs)
world_outputs = wcs_in.pixel_to_world(*pixel_inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
pixel_outputs = wcs_out.world_to_pixel(*world_outputs)
if wcs_out.pixel_n_dim == 1:
pixel_outputs = (pixel_outputs,)
for ipix in range(wcs_out.pixel_n_dim):
if ipix in pixel_out_indices:
outputs[ipix] = np.broadcast_to(pixel_outputs[ipix], original_shape)
return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs
def local_partial_pixel_derivatives(wcs, *pixel, normalize_by_world=False):
"""
Return a matrix of shape ``(world_n_dim, pixel_n_dim)`` where each entry
``[i, j]`` is the partial derivative d(world_i)/d(pixel_j) at the requested
pixel position.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS transformation to evaluate the derivatives for.
*pixel : float
The scalar pixel coordinates at which to evaluate the derivatives.
normalize_by_world : bool
If `True`, the matrix is normalized so that for each world entry
the derivatives add up to 1.
"""
# Find the world coordinates at the requested pixel
pixel_ref = np.array(pixel)
world_ref = np.array(wcs.pixel_to_world_values(*pixel_ref))
# Set up the derivative matrix
derivatives = np.zeros((wcs.world_n_dim, wcs.pixel_n_dim))
for i in range(wcs.pixel_n_dim):
pixel_off = pixel_ref.copy()
pixel_off[i] += 1
world_off = np.array(wcs.pixel_to_world_values(*pixel_off))
derivatives[:, i] = world_off - world_ref
if normalize_by_world:
derivatives /= derivatives.sum(axis=0)[:, np.newaxis]
return derivatives
def _linear_wcs_fit(params, lon, lat, x, y, w_obj):
"""
Objective function for fitting linear terms.
Parameters
----------
params : array
6 element array. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
x, y: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
cd = params[0:4]
crpix = params[4:6]
w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))
w_obj.wcs.crpix = crpix
lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)
resids = np.concatenate((lon-lon2, lat-lat2))
return resids
def _sip_fit(params, lon, lat, u, v, w_obj, order, coeff_names):
""" Objective function for fitting SIP.
Parameters
-----------
params : array
Fittable parameters. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
u, v: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
from ..modeling.models import SIP, InverseSIP # here to avoid circular import
# unpack params
crpix = params[0:2]
cdx = params[2:6].reshape((2, 2))
a_params = params[6:6+len(coeff_names)]
b_params = params[6+len(coeff_names):]
# assign to wcs, used for transfomations in this function
w_obj.wcs.cd = cdx
w_obj.wcs.crpix = crpix
a_coeff, b_coeff = {}, {}
for i in range(len(coeff_names)):
a_coeff['A_' + coeff_names[i]] = a_params[i]
b_coeff['B_' + coeff_names[i]] = b_params[i]
sip = SIP(crpix=crpix, a_order=order, b_order=order,
a_coeff=a_coeff, b_coeff=b_coeff)
fuv, guv = sip(u, v)
xo, yo = np.dot(cdx, np.array([u+fuv-crpix[0], v+guv-crpix[1]]))
# use all pix2world in case `projection` contains distortion table
x, y = w_obj.all_world2pix(lon, lat, 0)
x, y = np.dot(w_obj.wcs.cd, (x-w_obj.wcs.crpix[0], y-w_obj.wcs.crpix[1]))
resids = np.concatenate((x-xo, y-yo))
# to avoid bad restuls if near 360 -> 0 degree crossover
resids[resids > 180] = 360 - resids[resids > 180]
resids[resids < -180] = 360 + resids[resids < -180]
return resids
def fit_wcs_from_points(xy, world_coords, proj_point='center',
projection='TAN', sip_degree=None):
"""
Given two matching sets of coordinates on detector and sky,
compute the WCS.
Fits a WCS object to matched set of input detector and sky coordinates.
Optionally, a SIP can be fit to account for geometric
distortion. Returns an `~astropy.wcs.WCS` object with the best fit
parameters for mapping between input pixel and sky coordinates.
The projection type (default 'TAN') can passed in as a string, one of
the valid three-letter projection codes - or as a WCS object with
projection keywords already set. Note that if an input WCS has any
non-polynomial distortion, this will be applied and reflected in the
fit terms and coefficients. Passing in a WCS object in this way essentially
allows it to be refit based on the matched input coordinates and projection
point, but take care when using this option as non-projection related
keywords in the input might cause unexpected behavior.
Notes
------
- The fiducial point for the spherical projection can be set to 'center'
to use the mean position of input sky coordinates, or as an
`~astropy.coordinates.SkyCoord` object.
- Units in all output WCS objects will always be in degrees.
- If the coordinate frame differs between `~astropy.coordinates.SkyCoord`
objects passed in for ``world_coords`` and ``proj_point``, the frame for
``world_coords`` will override as the frame for the output WCS.
- If a WCS object is passed in to ``projection`` the CD/PC matrix will
be used as an initial guess for the fit. If this is known to be
significantly off and may throw off the fit, set to the identity matrix
(for example, by doing wcs.wcs.pc = [(1., 0.,), (0., 1.)])
Parameters
----------
xy : tuple of two `numpy.ndarray`
x & y pixel coordinates.
world_coords : `~astropy.coordinates.SkyCoord`
Skycoord object with world coordinates.
proj_point : 'center' or ~astropy.coordinates.SkyCoord`
Defaults to 'center', in which the geometric center of input world
coordinates will be used as the projection point. To specify an exact
point for the projection, a Skycoord object with a coordinate pair can
be passed in. For consistency, the units and frame of these coordinates
will be transformed to match ``world_coords`` if they don't.
projection : str or `~astropy.wcs.WCS`
Three letter projection code, of any of standard projections defined
in the FITS WCS standard. Optionally, a WCS object with projection
keywords set may be passed in.
sip_degree : None or int
If set to a non-zero integer value, will fit SIP of degree
``sip_degree`` to model geometric distortion. Defaults to None, meaning
no distortion corrections will be fit.
Returns
-------
wcs : `~astropy.wcs.WCS`
The best-fit WCS to the points given.
"""
from astropy.coordinates import SkyCoord # here to avoid circular import
import astropy.units as u
from .wcs import Sip
from scipy.optimize import least_squares
try:
xp, yp = xy
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
except AttributeError:
unit_sph = world_coords.unit_spherical
lon, lat = unit_sph.lon.deg, unit_sph.lat.deg
# verify input
if (proj_point != 'center') and (type(proj_point) != type(world_coords)):
raise ValueError("proj_point must be set to 'center', or an" +
"`~astropy.coordinates.SkyCoord` object with " +
"a pair of points.")
if proj_point != 'center':
assert proj_point.size == 1
proj_codes = [
'AZP', 'SZP', 'TAN', 'STG', 'SIN', 'ARC', 'ZEA', 'AIR', 'CYP',
'CEA', 'CAR', 'MER', 'SFL', 'PAR', 'MOL', 'AIT', 'COP', 'COE',
'COD', 'COO', 'BON', 'PCO', 'TSC', 'CSC', 'QSC', 'HPX', 'XPH'
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError("Must specify valid projection code from list of "
+ "supported types: ", ', '.join(proj_codes))
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame,
projection=projection)
else: #if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.cdelt = (1., 1.) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__('pc')
if (type(sip_degree) != type(None)) and (type(sip_degree) != int):
raise ValueError("sip_degree must be None, or integer.")
# set pixel_shape to span of input points
wcs.pixel_shape = (xp.max()-xp.min(), yp.max()-yp.min())
# determine CRVAL from input
close = lambda l, p: p[np.argmin(np.abs(l))]
if str(proj_point) == 'center': # use center of input points
sc1 = SkyCoord(lon.min()*u.deg, lat.max()*u.deg)
sc2 = SkyCoord(lon.max()*u.deg, lat.min()*u.deg)
pa = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
midpoint_sc = sc1.directional_offset_by(pa, sep/2)
wcs.wcs.crval = ((midpoint_sc.data.lon.deg, midpoint_sc.data.lat.deg))
wcs.wcs.crpix = ((xp.max()+xp.min())/2., (yp.max()+yp.min())/2.)
elif proj_point is not None: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (close(lon-wcs.wcs.crval[0], xp),
close(lon-wcs.wcs.crval[1], yp))
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
p0 = np.concatenate([wcs.wcs.cd.flatten(), wcs.wcs.crpix.flatten()])
fit = least_squares(_linear_wcs_fit, p0,
args=(lon, lat, xp, yp, wcs))
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_degree:
degree = sip_degree
if '-SIP' not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + '-SIP' for x in wcs.wcs.ctype]
coef_names = ['{0}_{1}'.format(i, j) for i in range(degree+1)
for j in range(degree+1) if (i+j) < (degree+1) and
(i+j) > 1]
p0 = np.concatenate((np.array(wcs.wcs.crpix), wcs.wcs.cd.flatten(),
np.zeros(2*len(coef_names))))
fit = least_squares(_sip_fit, p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names))
coef_fit = (list(fit.x[6:6+len(coef_names)]),
list(fit.x[6+len(coef_names):]))
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree+1, degree+1))
b_vals = np.zeros((degree+1, degree+1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(a_vals, b_vals, np.zeros((degree+1, degree+1)),
np.zeros((degree+1, degree+1)), wcs.wcs.crpix)
return wcs
| |
"""Network of GrFNNs
This module provides the necessary code to build a model connecting
multiple GrFNNs. Connections can be made within a GrFNN or between pairs
of them.
More importantly, it provides a method to run the model (process an
stimulus), including the ability to learn the connections between
GrFNNs.
To Do:
- Re-implement learning
"""
import sys
import json
import warnings
import numpy as np
from scipy.stats import norm
import logging
logger = logging.getLogger('pygrfnn.network')
from pygrfnn.utils import nl
from pygrfnn.utils import fareyratio
from pygrfnn.defines import COMPLEX, PI, PI_2
from pygrfnn.grfnn import GrFNN
from pygrfnn.grfnn import compute_input
from pygrfnn.grfnn import grfnn_update_event
from pygrfnn.oscillator import Zparam
from pygrfnn.resonances import threeFreqMonomials
def make_connections(source, dest, strength=1.0, range=1.02,
modes=None, mode_amps=None,
complex_kernel=False, self_connect=True, **kwargs):
"""Creates a connection matrix, that connects source layer to destination
layer.
Args:
source (:class:`.GrFNN`): source GrFNN (connections will be made
between ``source`` and ``dest``)
dest (:class:`.GrFNN`): destination GrFNN (connections will be
made between ``source`` and ``dest``)
strength (``float``): connection strength (multiplicative real
factor)
range (``float``): defines the standard deviation to use in the connections
("spread" them with neighbors). It is expressed as a ratio, to
account for the log scale of the oscillators' frequency. Must be
``> 1.0``
modes (:class:`numpy.ndarray`): frequency modes to connect
(e.g. [1/3, 1/2, 1, 2, 3]). If ``None``, it will be set to
``[1]``
mode_amplitudes (:class:`numpy.ndarray`): amplitude for each mode in
``modes`` (e.g. [.5, .75, 1, .75, .5]). If ``None``, it will be set to
``[1] * len(modes)``
complex_kernel (``bool``): If ``True``, the connections will be
complex (i.e. include phase information). Otherwise, the
connections will be real-valued weights.
self_connect (``bool``): if ``False``, the connection from ``source.f[i]``
to ``dest.f[j]`` (where ``source_f[i] == dest_f[j]``) will be set to
0
Returns:
:class:`numpy.ndarray`: Connection matrix (rows index destination and
columns index source). In other words, to obtain the state at the
destination, you must use ``C.dot(source.z)``, where ``C`` is the
connection matrix.
"""
# matrix (2D array) of relative frequencies
# source is indexed in columns and destination in rows. That is,
# RF(i,j) specifies the relative frequency of source_f[j] w.r.t.
# dest_f[i]
[FS, FT] = np.meshgrid(source.f, dest.f)
RF = FT/FS
logRF = np.log2(RF)
assert RF.shape == (len(dest.f), len(source.f))
# matrix of connections
# connection matrices index source in rows and destination in
# columns. That is, conn(i,j) specifies the connection weight from
# the i-th element to the j-th element
conns = np.zeros(RF.shape, dtype=COMPLEX)
if modes is None:
modes = [1]
logger.info('No modes received. Setting single mode 1:1')
if mode_amps is None:
mode_amps = [1.0] * len(modes)
logger.info('No mode amplitudes received. '
'Setting all modes to amplitude of 1')
assert len(modes) == len(mode_amps)
sigmas = np.abs(np.log2(range))*np.ones(len(modes))
log_modes = np.log2(modes)
per = np.floor(len(source.f)/(np.log2(source.f[-1])-np.log2(source.f[0])))
df = 1.0/per
# Make self connections using a Gaussian distribution
for m, a, s in zip(log_modes, mode_amps, sigmas):
R = a * norm.pdf(logRF, m, s) * df
if complex_kernel:
Q = PI_2*(2.0*norm.cdf(logRF, m, s)-1)
else:
Q = np.zeros(R.shape)
if not self_connect:
R[RF == 1] = 0
Q[RF == 1] = 0
conns += R * np.exp(1j*Q)
logger.info(("Created connection matrix between"
" '{}' and '{}' ({}x{})").format(source.name,
dest.name,
*conns.shape))
return strength * conns
class DuplicatedLayer(Exception):
"""
Raise when attempting to add a previously added layer to a network
Attributes:
layer (:class:`.GrFNN`): duplicated layer
"""
def __init__(self, layer):
self.layer = layer
class UnknownLayer(Exception):
"""
Raise when attempting to use a layer unknown to the network
Attributes:
layer (:class:`.GrFNN`): unknown layer
"""
def __init__(self, layer):
self.layer = layer
def __str__(self):
return "Unknown layer %s. Did you forget to call " \
"'add_layer(layer)'?" % (repr(self.layer))
class GrFNNExplosion(Exception):
"""
Raise when an oscillator in a GrFNN explodes (becomes `inf` or `nan`)
Attributes:
explosion_type (`string`): `inf` or `nan`
layer (:class:`.GrFNN`): exploding GrFNN
"""
def __init__(self, explosion_type, layer):
self.explosion_type = explosion_type
self.layer = layer
def __str__(self):
return "At least one oscillator in layer {} " \
"exploded ({})".format(repr(self.layer), self.explosion_type)
class Cparam(object):
"""Convenience class to encapsulate connectivity learning parameters.
Attributes:
l: (``float``): linear forgetting rate :math:`\\lambda`
m1: (``float``): non-linear forgetting rate 1 :math:`\\mu_1`
m2: (``float``): non-linear forgetting rate 2 :math:`\\mu_2`
k: (``float``): learning rate :math:`\\kappa`
e: (``float``): Coupling strength :math:`\\varepsilon`
ToDo:
Revise this (learning is probably broke, as I haven't updated it in a
while)
Note:
This class is analogous to :class:`.Zparam`
"""
def __init__(self, lmbda=-1.0, mu1=0.0, mu2=0.0,
kappa=1.0, epsilon=1.0):
"""Constructor.
Args:
lmbda (``float``): :math:`\\lambda` (defaults to: -1.0) (**this is
not a typo: `lambda` is a keyword in python, so we used a
misspelled version of the word**)
mu1 (``float``): :math:`\\mu_1` (defaults to: -1.0)
mu2 (``float``): :math:`\\mu_2` (defaults to: -0.25)
kappa (``float``): :math:`\\kappa` (defaults to: 0.0)
epsilon (``float``): :math:`\\varepsilon` (defaults to: 1.0)
"""
logger.warning('Apparently you want to use learning (plasticity). '
'This feature is probably not working properly '
'(if at all). YOU HAVE BEEN WARNED!')
self.l = lmbda
self.m1 = mu1
self.m2 = mu2
self.k = kappa
self.e = epsilon
self.sqe = np.sqrt(self.e)
def __repr__(self):
return "Cparams:\n" \
"\tlambda: {0}\n" \
"\tmu_1: {1}\n" \
"\tmu_2: {2}\n" \
"\tkappa: {3}\n" \
"\tepsilon: {4}\n".format(self.l,
self.m1,
self.m2,
self.k,
self.e)
class Connection(object):
"""
Connection object
Args:
source (:class:`.GrFNN`): source layer
destination (:class:`.GrFNN`): destination layer
matrix (:class:`numpy.ndarray`): connection matrix
conn_type (``string``): type of GrFNN connections to use. Possible values:
``allfreq``, ``all2freq``, ``1freq``, ``2freq``, ``3freq``
self_connect (``bool``): if ``False``, the diagonal of the
matrix is kept to 0 (even when learning is enabled)
weight (``float``): frequency weight factor
learn_params (:class:`.Cparam`): learning params. No learning is performed
when set to ``None``.
Attributes:
source (:class:`.GrFNN`): source layer
destination (:class:`.GrFNN`): destination layer
matrix (:class:`numpy.ndarray`): connection matrix
cparams (:class:`.Cparam`): Learning params (`None` means no learning)
self_connect (``bool``): If ``False``, the connection weights connecting
two oscillators of the same frequency will be set to 0
RF (:class:`numpy.ndarray`): array of frequency ratio.
``RF[i,j] = dest.f[i]/source.f[j]``
farey_num (:class:`numpy.ndarray`): Farey numerator for the
frequency relationship ``RF[i,j]`` (only set one using ``2freq``
coupling).
farey_den (:class:`numpy.ndarray`): Farey denominator for the
frequency relationship ``RF[i,j]`` (only set one using ``2freq``
coupling).
monomials (``list``): List of :class:`python.namedtuples` of the from
``(indices, exponents)``. There is one ``namedtuple`` for each
oscillator in ``destination`` GrFNN. Each tuple is formed by of two
:class:`numpy.ndarray` with indices and exponents of a 3-freq
monomial, each one of size Nx3, where N is the number of monomials
associated to the corresponding oscillator in ``destination``. This
attribute is only set for ``3freq`` connection type.
Warning:
Learning has not been updated lately, so it is probably broken.
Warning:
``1freq`` connectivity is not implemented
"""
def __init__(self,
source,
destination,
matrix,
conn_type,
self_connect,
weight=1.0,
learn_params=None,
conn_params=None):
self.source = source
self.destination = destination
self.matrix = matrix.copy()
self.cparams = learn_params
self.self_connect = self_connect
self.conn_type = conn_type
# this is only for 'log' spaced GrFNNs
self.weights = weight * destination.f
[FS, FT] = np.meshgrid(self.source.f, self.destination.f)
self.RF = FT/FS
self.farey_num, self.farey_den = None, None
self.monomials = None
# if not self.self_connect:
# self.matrix[np.logical_and(self.farey_num==1, self.farey_den==1)] = 0
if not self.self_connect:
self.matrix[self.RF==1.0] = 0
if conn_type == '1freq':
raise(Exception("1freq connection not yet implemented"))
elif conn_type == '2freq':
# compute integer relationships between frequencies of both layers
tol = 0.05
if conn_params is not None:
if 'tol' in connection_params:
tol = connection_params['tol']
logger.info('Setting up 2-freq coupling between '
'"{}" and "{}", using tol={}'.format(source.name,
destination.name,
tol))
self.farey_num, self.farey_den, _, _ = fareyratio(self.RF, tol)
elif conn_type == '3freq':
# default params
N = 3
tol = 5e-3
lowest_order_only = True
if conn_params is not None:
if 'N' in conn_params:
N = conn_params['N']
if 'tol' in conn_params:
tol = conn_params['tol']
if 'lowest_order_only' in conn_params:
lowest_order_only = conn_params['lowest_order_only']
logger.info('Setting up 3-freq coupling between '
'"{}" and "{}", with params N={}, '
'tol = {} and '
'lowest_order_only = {}'.format(source.name,
destination.name,
N,
tol,
lowest_order_only))
self.monomials = threeFreqMonomials(self.source.f,
self.destination.f,
self_connect,
N=N,
tol=tol,
lowest_order_only=lowest_order_only)
def avg():
a = 0
for i, m in enumerate(self.monomials):
a += m.indices.shape[0]
# logger.debug("{}: {}".format(i, m.indices.shape))
return a/len(self.monomials)
logger.info('Found {} monomials per oscillator (avg)'.format(avg()))
def __repr__(self):
return "Connection from {0} " \
"(self_connect={1})\n".format(self.source.name,
self.self_connect)
class Model(object):
"""
A network of GrFNNs.
Different GrFNNs are referred to as layers. Layers can be added as
visible or hidden; the former means that it will directly receive
external stimulus, while the later implies that the inputs will
consist only of internal connections (internal to the layer or from
other layers in the network).
Attributes:
connections (``dict``): dictionary of
connections. *Keys* are destination layers (:class:`.GrFNN`) and
*values* are a list of connections (:class:`.Connection`).
"""
def __init__(self, name=""):
"""
**Model constructor**
Args:
name (``string``): (optional) model name (empty by default). This
argument is required when defining a model from a dictionary or
JSON object (see :func:`.modelFromJSON`)
"""
self.name = name
# list of GrFNN layers (and its corresponding external input channel)
self._layers = []
# connections
self.connections = {}
def __repr__(self):
return "Model:\n" \
"\tlayers: {0}\n" \
"\tconnections: {1}\n".format(len(self.layers()),
len(self.connections))
def layers(self):
"""
Return a list of GrFNNs in the model.
"""
return [t[0] for t in self._layers]
def add_layer(self, layer, input_channel=None):
"""
Add a GrFNN (layer) to the model
Args:
layer (:class:`.GrFNN`): the GrFNN to add to the model
input_channel (`int` or `None`): If ``None`` (default), no external
signal (stimulus) will be fed into this layer.
Otherwise identifies the input channel to be fed into
the layer.
Raises:
DuplicatedLayer
"""
if layer not in self.layers():
self._layers.append((layer, input_channel))
self.connections[layer] = [] # list of connected layers.
# List elems should be tuples
# of the form (source_layer,
# connextion_matrix)
if layer.name == '':
layer.name = 'Layer {}'.format(len(self._layers))
else:
raise DuplicatedLayer(layer)
def connect_layers(self,
source,
destination,
matrix,
connection_type,
weight=1.0,
learn=None,
self_connect=False,
connection_params=None):
"""
Connect two layers in a :class:`Model`.
Args:
source (:class:`.GrFNN`): source layer (connections will be
made from this layer to *destination*)
destination (:class:`.GrFNN`): destination layer
(connections will be made from *source* layer to this
layer)
matrix (:class:`numpy.array`): connection matrix
connection_type (``string``): type of connection (e.g. ``1freq``,
``2freq``, ``3freq``, ``allfreq``, ``all2freq``)
weight (``float``): connection weight factor.
learn (:class:`.Cparams`): Learning parameters. Is ``None``, no
learning will occur.
self_connect (``bool``): whether or not to connect oscillators of the
same frequency (defaults to ``False``).
connection_params (``dict``): dictionary with connection_type
specific parameters
Returns:
:class:`.Connection`: connection object created
Warning:
``1freq`` connection is not implemented.
"""
if source not in self.layers():
raise UnknownLayer(source)
if destination not in self.layers():
raise UnknownLayer(destination)
conn = Connection(source, destination, matrix, connection_type,
weight=weight, learn_params=learn,
self_connect=self_connect,
conn_params=connection_params)
self.connections[destination].append(conn)
return conn
def run(self, signal, t, dt, learn=False):
"""Run the model for a given stimulus, using "intertwined" RK4
Args:
signal (:class:`numpy.ndarray`): external stimulus. If
multichannel, the first dimension indexes time and the
second one indexes channels
t (:class:`numpy.ndarray`): time vector corresponding to the
signal
dt (``float``): sampling period of ``signal``
learn (``bool``): enable connection learning
Warning:
Learning has not been updated lately, so it is probably broken.
Note:
Intertwined means that a singe RK4 step needs to be run for
all layers in the model, before running the next RK4 step.
This is due to the fact that :math:`\\dot{z} = f(t, x(t), z(t))`.
The "problem" is that :math:`x(t)` is also a function of
:math:`z(t)`, so it needs to be updated for each layer in
each RK step.
Pseudo-code: ::
for (i, stim) in stimulus:
for L in layers:
compute L.k1 given stim(-1), layers.z(-1)
for L in layers:
compute L.k2 given stim(-.5), layers.z(-1), L.k1
for L in layers:
compute L.k3 given stim(-.5), layers.z(-1), L.k2
for L in layers:
compute L.x(0), L.k4 given stim(0), layers.z(-1), L.k3
for L in layers:
compute L.z given L.k1, L.k2, L.k3, L.k4
L.TF[:,i] = L.z
Note:
The current implementation assumes **constant sampling
period** ``dt``
Note:
If ``learn is True``, then the Hebbian Learning algorithm
described in
Edward W. Large. *Music Tonality, Neural Resonance and
Hebbian Learning.* **Proceedings of the Third International
Conference on Mathematics and Computation in Music
(MCM 2011)**, pp. 115--125, Paris, France, 2011.
is used to update the connections:
.. math::
\\dot{c_{ij}} = -\\delta_{ij}c_{ij} + k_{ij}
\\frac{z_{i}}{1-\\sqrt{\\varepsilon}z_i}
\\frac{\\bar{z}_{j}}
{1-\\sqrt{\\varepsilon}\\bar{z}_j}
Warning:
The above equation differs from the equation presented in
the afore mentioned reference (:math:`j` was used as
subscript in the last fractional term, instead of :math:`i`,
as is in the paper). It seems there it was a typo in the
reference, but this **must be verified** with the author.
Furthermore, the current implementation assumes that :math:`i`
indexes the source layer and :math:`j` indexes the destination
layer (this needs confirmation as well).
"""
num_frames = signal.shape[0]
if signal.ndim == 1:
signal = np.atleast_2d(signal).T
# dispatch update even for the initial state
for L in self.layers():
if L.save_states:
L._prepare_Z(len(t))
logger.info('"{}" ready to store TFR'.format(L.name))
grfnn_update_event.send(sender=L, t=t[0], index=0)
# Run fixed step RK4
nc = len(str(num_frames))
msg = '\r{{0:0{0}d}}/{1}'.format(nc, num_frames)
for i in range(1, num_frames):
s = signal[i-1, :]
s_next = signal[i, :]
# input signal (need interpolated values for k2 & k3)
# linear interpolation should be fine
x_stim = [s, 0.5*(s+s_next), 0.5*(s+s_next), s_next]
for k in range(4):
for L, inchan in self._layers:
stim = 0 if inchan is None else x_stim[k][inchan]
# ToDo DRY: The following if/elif... could be further
# simplified to adhere to DRY, but for some reason the
# version implemented using setattr() didn't give the same
# results?!
if k==0:
L.k1 = _rk_step(L, dt, self.connections, stim, 'k0')
elif k==1:
L.k2 = _rk_step(L, dt, self.connections, stim, 'k1')
elif k==2:
L.k3 = _rk_step(L, dt, self.connections, stim, 'k2')
elif k==3:
L.k4 = _rk_step(L, dt, self.connections, stim, 'k3')
# final RK step
for L in self.layers():
L.z += dt*(L.k1 + 2.0*L.k2 + 2.0*L.k3 + L.k4)/6.0
if np.isnan(L.z).any():
raise GrFNNExplosion('nan', L)
if np.isinf(L.z).any():
raise GrFNNExplosion('inf', L)
# dispatch update event
grfnn_update_event.send(sender=L, t=t[i], index=i)
# learn connections
for L in self.layers():
for j, conn in enumerate(self.connections[L]):
if conn.cparams is not None:
conn.matrix += learn_step(conn)
if not conn.self_connection:
# FIXME: This only works if source.f == destination.f
conn.matrix[range(len(conn.source.f)),
range(len(conn.destination.f))] = 0
# progress indicator
sys.stdout.write(msg.format(i+1))
sys.stdout.flush()
sys.stdout.write(" done!\n")
# force final update (mainly to make sure the last state is
# reflected in on-line displays)
for L in self.layers():
grfnn_update_event.send(sender=L,
t=t[num_frames-1],
index=num_frames-1,
force=True)
# helper function that performs a single RK4 step (any of them)
def _rk_step(layer, dt, connections, stim, pstep):
"""Single RK4 step
Args:
layer (:class:`grfnn.GrFNN`): layer to be integrated
dt (``float``): integration step (in seconds)
connections (``dict``): connection dictionary (see :class:`Model`)
stim (``float``): external stimulus sample
pstep (``string``): string identifying the previous RK step
``{'', 'k1', 'k2', 'k3'}``
Returns:
:class:`numpy.ndarray`: :math:`\\dot{z}` updated for the input
:class:`.GrFNN` (``layer``).
"""
h = dt if pstep is 'k3' else 0.5*dt
k = getattr(layer, pstep, 0)
z = layer.z + h*k
conns = [None]*len(connections[layer])
for i, c in enumerate(connections[layer]):
src = c.source
ks = getattr(src, pstep, 0)
conns[i] = (src.z + h*ks, c)
x = compute_input(layer, z, conns, stim)
return layer.zdot(x, z, layer.f, layer.zparams)
# helper function that updates connection matrix
# FIXME: this needs to be updated! It hasn't been touched after getting a hold
# to Matlab's GrFNN Toolbox
def learn_step(conn):
"""Update connection matrices
Args:
conn (:class:`.Connection`): connection object
Returns:
:class:`np.ndarray`: derivative of connections (to use in
connection update rule)
ToDo:
Revise (learning needs to be updated)
"""
# TODO: test
e = conn.destination.zparams.e
zi = conn.source.z
zj_conj = np.conj(conn.destination.z) # verify which one should
# be conjugated
active = np.outer(zi*nl(zi, np.sqrt(e)),
zj_conj*nl(zj_conj, np.sqrt(e)))
return -conn.d*conn.matrix + conn.k*active
def modelFromJSON(definition=None):
"""
Utility function to create a model (Network) from a JSON object (or a python
``dict``)
Args:
definition (``string``, ``JSON`` or ``dict``): model definition
Returns:
:class:`.Model`: Model, as specified in the input object.
"""
try:
D = json.loads(definition)
except TypeError:
# assume we received a dict (already parsed JSON)
D = dict(definition)
except Exception:
raise
model = Model(name=D["name"])
layers = dict()
# create layers
for L in D["layers"]:
# print "Creating layer", L["name"]
layer = GrFNN(**L)
if "input_channel" in L:
model.add_layer(layer, input_channel=L["input_channel"])
else:
model.add_layer(layer)
layers[layer.name] = layer
# connect layers
for C in D["connections"]:
try:
# print "Creating connection {} -> {}".format(C["source_name"], C["target_name"])
source = layers[C["source_name"]]
target = layers[C["target_name"]]
M = make_connections(source, target, **C)
conn_params = dict()
for key in ('weight', 'learn', 'self_connect', 'connection_params'):
if key in C:
conn_params[key] = C[key]
c = model.connect_layers(source, target, matrix=M,
connection_type=C['connection_type'],
**conn_params)
except Exception as e:
print "Error creating connection {}".format(C)
print e
return model
if __name__ == '__main__':
rhythm_model_definition = """
{
"name": "Sensory Motor Rhythm model",
"layers": [
{
"name": "sensory network",
"zparams": {
"alpha": 0.00001,
"beta1": 0.0,
"beta2": -2.0,
"delta1": 0.0,
"delta2": 0.0,
"epsilon": 1.0
},
"frequency_range": [0.375, 12.0],
"num_oscs": 321,
"stimulus_conn_type": "linear",
"w": 3.0,
"input_channel": 0
},
{
"name": "motor network",
"zparams": {
"alpha": -0.4,
"beta1": 1.75,
"beta2": -1.25,
"delta1": 0.0,
"delta2": 0.0,
"epsilon": 1.0
},
"frequency_range": [0.375, 12.0],
"num_oscs": 321,
"stimulus_conn_type": "active"
}
],
"connections": [
{
"source_name": "sensory network",
"target_name": "sensory network",
"modes": [0.333333333333, 0.5, 1, 2.0, 3.0],
"amps": [1, 1, 1, 1, 1],
"strength": 1.0,
"range": 1.05,
"connection_type": "2freq",
"self_connect": false,
"weight": 0.1
},
{
"source_name": "sensory network",
"target_name": "motor network",
"modes": [0.333333333333, 0.5, 1, 2.0, 3.0],
"amps": [1, 1, 1, 1, 1],
"strength": 1.25,
"range": 1.05,
"connection_type": "2freq",
"self_connect": true,
"weight": 0.4
},
{
"source_name": "motor network",
"target_name": "motor network",
"modes": [0.333333333333, 0.5, 1, 2.0, 3.0],
"amps": [1, 1, 1, 1, 1],
"strength": 1.0,
"range": 1.05,
"connection_type": "2freq",
"self_connect": false,
"weight": 0.1
},
{
"source_name": "motor network",
"target_name": "sensory network",
"modes": [0.333333333333, 0.5, 1, 2.0, 3.0],
"amps": [1, 1, 1, 1, 1],
"strength": 0.2,
"range": 1.05,
"connection_type": "2freq",
"self_connect": true,
"weight": 0.05
}
]
}
"""
rhythmModel = modelFromJSON(rhythm_model_definition)
| |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper around urlfetch to call REST API, with retries."""
import json
import logging
import urllib
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.runtime import apiproxy_errors
from codereview import common
EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
class Error(Exception):
"""Raised on non-transient errors."""
def __init__(self, msg, status_code, response):
super(Error, self).__init__(msg)
self.status_code = status_code
self.response = response
class NotFoundError(Error):
"""Raised if endpoint returns 404."""
class AuthError(Error):
"""Raised if endpoint returns 401 or 403."""
# Do not log Error exception raised from a tasklet, it is expected to happen.
ndb.add_flow_exception(Error)
def urlfetch_async(**kwargs):
"""To be mocked in tests."""
return ndb.get_context().urlfetch(**kwargs)
@ndb.tasklet
def request_async(
url,
method='GET',
payload=None,
params=None,
headers=None,
scopes=None,
deadline=None,
max_attempts=None):
"""Sends a REST API request, returns raw unparsed response.
Retries the request on transient errors for up to |max_attempts| times.
Args:
url: url to send the request to.
method: HTTP method to use, e.g. GET, POST, PUT.
payload: raw data to put in the request body.
params: dict with query GET parameters (i.e. ?key=value&key=value).
headers: additional request headers.
scopes: OAuth2 scopes for the access token (ok skip auth if None).
deadline: deadline for a single attempt (10 sec by default).
max_attempts: how many times to retry on errors (4 times by default).
Returns:
Buffer with raw response.
Raises:
NotFoundError on 404 response.
AuthError on 401 or 403 response.
Error on any other non-transient error.
"""
deadline = 10 if deadline is None else deadline
max_attempts = 4 if max_attempts is None else max_attempts
if common.IS_DEV:
protocols = ('http://', 'https://')
else:
protocols = ('https://',)
assert url.startswith(protocols) and '?' not in url, url
if params:
url += '?' + urllib.urlencode(params)
if scopes:
access_token = app_identity.get_access_token(scopes)[0]
headers = (headers or {}).copy()
headers['Authorization'] = 'Bearer %s' % access_token
if payload is not None:
assert isinstance(payload, str), type(payload)
assert method in ('CREATE', 'POST', 'PUT'), method
attempt = 0
response = None
while attempt < max_attempts:
if attempt:
logging.info('Retrying...')
attempt += 1
logging.info('%s %s', method, url)
try:
response = yield urlfetch_async(
url=url,
payload=payload,
method=method,
headers=headers or {},
follow_redirects=False,
deadline=deadline,
validate_certificate=True)
except (apiproxy_errors.DeadlineExceededError, urlfetch.Error) as e:
# Transient network error or URL fetch service RPC deadline.
logging.warning('%s %s failed: %s', method, url, e)
continue
# Transient error on the other side.
if response.status_code >= 500 or response.status_code == 408:
logging.warning(
'%s %s failed with HTTP %d: %r',
method, url, response.status_code, response.content)
continue
# Non-transient error.
if 300 <= response.status_code < 500:
logging.warning(
'%s %s failed with HTTP %d: %r',
method, url, response.status_code, response.content)
cls = Error
if response.status_code == 404:
cls = NotFoundError
elif response.status_code in (401, 403):
cls = AuthError
raise cls(
'Failed to call %s: HTTP %d' % (url, response.status_code),
response.status_code, response.content)
# Success. Beware of large responses.
if len(response.content) > 1024 * 1024:
logging.warning('Response size: %.1f KiB', len(response.content) / 1024.0)
raise ndb.Return(response.content)
raise Error(
'Failed to call %s after %d attempts' % (url, max_attempts),
response.status_code if response else None,
response.content if response else None)
def request(*args, **kwargs):
"""Blocking version of request_async."""
return request_async(*args, **kwargs).get_result()
@ndb.tasklet
def json_request_async(
url,
method='GET',
payload=None,
params=None,
headers=None,
scopes=None,
deadline=None,
max_attempts=None):
"""Sends a JSON REST API request, returns deserialized response.
Retries the request on transient errors for up to |max_attempts| times.
Args:
url: url to send the request to.
method: HTTP method to use, e.g. GET, POST, PUT.
payload: object to serialized to JSON and put in the request body.
params: dict with query GET parameters (i.e. ?key=value&key=value).
headers: additional request headers.
scopes: OAuth2 scopes for the access token (ok skip auth if None).
deadline: deadline for a single attempt.
max_attempts: how many times to retry on errors.
Returns:
Deserialized JSON response.
Raises:
NotFoundError on 404 response.
AuthError on 401 or 403 response.
Error on any other non-transient error.
"""
if payload is not None:
headers = (headers or {}).copy()
headers['Content-Type'] = 'application/json; charset=utf-8'
payload = json.dumps(payload, sort_keys=True)
response = yield request_async(
url=url,
method=method,
payload=payload,
params=params,
headers=headers,
scopes=scopes,
deadline=deadline,
max_attempts=max_attempts)
try:
response = json.loads(response)
except ValueError as e:
raise Error('Bad JSON response: %s' % e, None, response)
raise ndb.Return(response)
def json_request(*args, **kwargs):
"""Blocking version of json_request_async."""
return json_request_async(*args, **kwargs).get_result()
| |
#!/usr/bin/env python2.7
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import re
import os
import posixpath
import StringIO
import sys
import subprocess
from contextlib import closing
def BuildFileMatchRegex(*file_matchers):
return re.compile('^' + '|'.join(file_matchers) + '$')
# Chrome specific files which are not in Monochrome.apk
CHROME_SPECIFIC = BuildFileMatchRegex(
r'lib/.*/libchrome\.so',
r'lib/.*/libchrome\.\d{4}\.\d{2,3}\.so', # libchrome placeholders
r'lib/.*/libchromium_android_linker\.so',
r'lib/.*/libchromeview\.so', # placeholder library
r'lib/.*/libchrome_crashpad_handler\.so',
r'lib/.*/crazy\.libchrome\.so',
r'lib/.*/crazy\.libchrome\.align',
r'lib/.*/gdbserver',
# Monochrome doesn't have any res directories whose api number is less
# than v24.
r'res/.*-v1\d/.*\.xml',
r'res/.*-v2[0-3]/.*\.xml',
r'META-INF/.*',
r'assets/metaresources.arsc',
r'assets/AndroidManifest.xml')
# WebView specific files which are not in Monochrome.apk
WEBVIEW_SPECIFIC = BuildFileMatchRegex(
r'lib/.*/libwebviewchromium\.so',
r'lib/.*/libchromium_android_linker\.so',
r'assets/webview_licenses.notice',
r'res/.*/icon_webview(.webp)?',
r'META-INF/.*',
# Monochrome doesn't have any res directories
# whose api level is less than v24.
r'res/.*-v1\d/.*\.xml',
r'res/.*-v2[0-3]/.*\.xml',
r'lib/.*/gdbserver')
# The files in Chrome are not same as those in Monochrome
CHROME_CHANGES = BuildFileMatchRegex(
r'AndroidManifest\.xml',
r'resources\.arsc',
r'classes\.dex',
r'classes2\.dex',
r'res/.*\.xml', # Resource id isn't same
r'assets/unwind_cfi_32', # Generated from apk's shared library
# All pak files except chrome_100_percent.pak are different
r'assets/resources\.pak',
r'assets/locales/am\.pak',
r'assets/locales/ar\.pak',
r'assets/locales/bg\.pak',
r'assets/locales/ca\.pak',
r'assets/locales/cs\.pak',
r'assets/locales/da\.pak',
r'assets/locales/de\.pak',
r'assets/locales/el\.pak',
r'assets/locales/en-GB\.pak',
r'assets/locales/en-US\.pak',
r'assets/locales/es-419\.pak',
r'assets/locales/es\.pak',
r'assets/locales/fa\.pak',
r'assets/locales/fi\.pak',
r'assets/locales/fil\.pak',
r'assets/locales/fr\.pak',
r'assets/locales/he\.pak',
r'assets/locales/hi\.pak',
r'assets/locales/hr\.pak',
r'assets/locales/hu\.pak',
r'assets/locales/id\.pak',
r'assets/locales/it\.pak',
r'assets/locales/ja\.pak',
r'assets/locales/ko\.pak',
r'assets/locales/lt\.pak',
r'assets/locales/lv\.pak',
r'assets/locales/nb\.pak',
r'assets/locales/nl\.pak',
r'assets/locales/pl\.pak',
r'assets/locales/pt-BR\.pak',
r'assets/locales/pt-PT\.pak',
r'assets/locales/ro\.pak',
r'assets/locales/ru\.pak',
r'assets/locales/sk\.pak',
r'assets/locales/sl\.pak',
r'assets/locales/sr\.pak',
r'assets/locales/sv\.pak',
r'assets/locales/sw\.pak',
r'assets/locales/th\.pak',
r'assets/locales/tr\.pak',
r'assets/locales/uk\.pak',
r'assets/locales/vi\.pak',
r'assets/locales/zh-CN\.pak',
r'assets/locales/zh-TW\.pak')
# The files in WebView are not same as those in Monochrome
WEBVIEW_CHANGES = BuildFileMatchRegex(
r'AndroidManifest\.xml',
r'resources\.arsc',
r'classes\.dex',
r'res/.*\.xml', # Resource id isn't same
r'assets/.*\.pak') # All pak files are not same as Monochrome
# Parse the output of unzip -lv, like
# 2384 Defl:N 807 66% 2001-01-01 00:00 2f2d9fce res/xml/privacy.xml
ZIP_ENTRY = re.compile(
"^ *[0-9]+ +\S+ +[0-9]+ +(?P<cmpr>[0-9]{1,2})% +\S+ +\S+ +"
"(?P<crc>[0-9a-fA-F]+) +(?P<name>\S+)"
)
class APKEntry:
def __init__(self, filename, crc, uncompressed):
self.filename = filename
self.CRC = crc
self.uncompressed = uncompressed
def DumpAPK(apk):
args = ['unzip', '-lv']
args.append(apk)
content = subprocess.check_output(args)
apk_entries = []
with closing(StringIO.StringIO(content)) as f:
for line in f:
match = ZIP_ENTRY.match(line)
if match:
apk_entries.append(
APKEntry(
match.group('name'), match.group('crc'),
match.group('cmpr') == 0))
return apk_entries
def VerifySameFile(monochrome_dict, apk, changes):
"""Verify apk file content matches same files in monochrome.
Verify files from apk are same as those in monochrome except files
in changes.
"""
diff = []
for a in apk:
# File may not exists due to exists_in_some_form().
m = monochrome_dict.get(a.filename)
if m and m.CRC != a.CRC and not changes.match(m.filename):
diff.append(a.filename)
if len(diff):
raise Exception("The following files are not same as Monochrome:\n %s" %
'\n'.join(diff))
def VerifyUncompressed(monochrome, apk):
"""Verify uncompressed files in apk are a subset of those in monochrome.
Verify files not being compressed in apk are also uncompressed in
Monochrome APK.
"""
uncompressed = [i.filename for i in apk if i.uncompressed ]
monochrome_uncompressed = [i.filename for i in monochrome if i.uncompressed]
compressed = [u for u in uncompressed if u not in monochrome_uncompressed]
if len(compressed):
raise Exception("The following files are compressed in Monochrome:\n %s" %
'\n'.join(compressed))
def SuperSetOf(monochrome, apk):
"""Verify Monochrome is super set of apk."""
def exists_in_some_form(f):
if f in monochrome:
return True
if not f.startswith('res/'):
return False
name = '/' + posixpath.basename(f)
# Some resources will exists in apk but not in monochrome due to the
# difference in minSdkVersion. https://crbug.com/794438
# E.g.:
# apk could have: res/drawable/foo.png, res/drawable-v23/foo.png
# monochrome (minSdkVersion=24) would need only: res/drawable-v23/foo.png
return any(x.endswith(name) for x in monochrome)
missing_files = [f for f in apk if not exists_in_some_form(f)]
if len(missing_files):
raise Exception('The following files are missing in Monochrome:\n %s' %
'\n'.join(missing_files))
def RemoveSpecific(apk_entries, specific):
return [i for i in apk_entries
if not specific.search(i.filename) ]
def LoadPathmap(pathmap_path):
"""Load the pathmap of obfuscated resource paths.
Returns: A dict mapping from obfuscated paths to original paths or an
empty dict if passed a None |pathmap_path|.
"""
if pathmap_path is None:
return {}
pathmap = {}
with open(pathmap_path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('#') or line == '':
continue
original, renamed = line.split(' -> ')
pathmap[renamed] = original
return pathmap
def DeobfuscateFilename(obfuscated_filename, pathmap):
return pathmap.get(obfuscated_filename, obfuscated_filename)
def ParseArgs(args):
"""Parses command line options.
Returns:
An Namespace from argparse.parse_args()
"""
parser = argparse.ArgumentParser(prog='monochrome_apk_checker')
parser.add_argument(
'--monochrome-apk', required=True, help='The monochrome APK path.')
parser.add_argument(
'--monochrome-pathmap', help='The monochrome APK resources pathmap path.')
parser.add_argument('--chrome-apk',
required=True,
help='The chrome APK path.')
parser.add_argument(
'--chrome-pathmap', help='The chrome APK resources pathmap path.')
parser.add_argument('--system-webview-apk',
required=True,
help='The system webview APK path.')
parser.add_argument(
'--system-webview-pathmap',
help='The system webview APK resources pathmap path.')
return parser.parse_args(args)
def main():
options = ParseArgs(sys.argv[1:])
monochrome = DumpAPK(options.monochrome_apk)
monochrome_pathmap = LoadPathmap(options.monochrome_pathmap)
monochrome_files = [
DeobfuscateFilename(f.filename, monochrome_pathmap) for f in monochrome
]
monochrome_dict = dict([(DeobfuscateFilename(i.filename, monochrome_pathmap),
i) for i in monochrome])
chrome = RemoveSpecific(DumpAPK(options.chrome_apk),
CHROME_SPECIFIC)
if len(chrome) == 0:
raise Exception('Chrome should have common files with Monochrome')
webview = RemoveSpecific(DumpAPK(options.system_webview_apk),
WEBVIEW_SPECIFIC)
if len(webview) == 0:
raise Exception('WebView should have common files with Monochrome')
def check_apk(apk, pathmap):
apk_files = [DeobfuscateFilename(f.filename, pathmap) for f in apk]
SuperSetOf(monochrome_files, apk_files)
VerifyUncompressed(monochrome, apk)
VerifySameFile(monochrome_dict, chrome, CHROME_CHANGES)
VerifySameFile(monochrome_dict, webview, WEBVIEW_CHANGES)
chrome_pathmap = LoadPathmap(options.chrome_pathmap)
check_apk(chrome, chrome_pathmap)
webview_pathmap = LoadPathmap(options.system_webview_pathmap)
check_apk(webview, webview_pathmap)
if __name__ == '__main__':
sys.exit(main())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
from optparse import OptionParser
import os
import re
import sys
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
# Make sure we're using Babel source, and not some previously installed version
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..'))
from babel import dates, numbers
from babel.plural import PluralRule
from babel.localedata import Alias
from babel._compat import pickle, text_type
parse = ElementTree.parse
weekdays = {'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3, 'fri': 4, 'sat': 5,
'sun': 6}
def _text(elem):
buf = [elem.text or '']
for child in elem:
buf.append(_text(child))
buf.append(elem.tail or '')
return u''.join(filter(None, buf)).strip()
NAME_RE = re.compile(r"^\w+$")
TYPE_ATTR_RE = re.compile(r"^\w+\[@type='(.*?)'\]$")
NAME_MAP = {
'dateFormats': 'date_formats',
'dateTimeFormats': 'datetime_formats',
'eraAbbr': 'abbreviated',
'eraNames': 'wide',
'eraNarrow': 'narrow',
'timeFormats': 'time_formats'
}
def log(message, *args):
if args:
message = message % args
sys.stderr.write(message + '\r\n')
sys.stderr.flush()
def error(message, *args):
log('ERROR: %s' % message, *args)
def need_conversion(dst_filename, data_dict, source_filename):
with open(source_filename, 'rb') as f:
blob = f.read(4096)
version = int(re.search(b'version number="\\$Revision: (\\d+)',
blob).group(1))
data_dict['_version'] = version
if not os.path.isfile(dst_filename):
return True
with open(dst_filename, 'rb') as f:
data = pickle.load(f)
return data.get('_version') != version
def _translate_alias(ctxt, path):
parts = path.split('/')
keys = ctxt[:]
for part in parts:
if part == '..':
keys.pop()
else:
match = TYPE_ATTR_RE.match(part)
if match:
keys.append(match.group(1))
else:
assert NAME_RE.match(part)
keys.append(NAME_MAP.get(part, part))
return keys
def _parse_currency_date(s):
if not s:
return None
parts = s.split('-', 2)
return tuple(map(int, parts + [1] * (3 - len(parts))))
def _currency_sort_key(tup):
code, start, end, tender = tup
return int(not tender), start or (1, 1, 1)
def main():
parser = OptionParser(usage='%prog path/to/cldr')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
srcdir = args[0]
destdir = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
'..', 'babel')
sup_filename = os.path.join(srcdir, 'supplemental', 'supplementalData.xml')
bcp47_timezone = parse(os.path.join(srcdir, 'bcp47', 'timezone.xml'))
sup_windows_zones = parse(os.path.join(srcdir, 'supplemental',
'windowsZones.xml'))
sup_metadata = parse(os.path.join(srcdir, 'supplemental',
'supplementalMetadata.xml'))
sup_likely = parse(os.path.join(srcdir, 'supplemental',
'likelySubtags.xml'))
sup = parse(sup_filename)
# Import global data from the supplemental files
global_path = os.path.join(destdir, 'global.dat')
global_data = {}
if need_conversion(global_path, global_data, sup_filename):
territory_zones = global_data.setdefault('territory_zones', {})
zone_aliases = global_data.setdefault('zone_aliases', {})
zone_territories = global_data.setdefault('zone_territories', {})
win_mapping = global_data.setdefault('windows_zone_mapping', {})
language_aliases = global_data.setdefault('language_aliases', {})
territory_aliases = global_data.setdefault('territory_aliases', {})
script_aliases = global_data.setdefault('script_aliases', {})
variant_aliases = global_data.setdefault('variant_aliases', {})
likely_subtags = global_data.setdefault('likely_subtags', {})
territory_currencies = global_data.setdefault('territory_currencies', {})
parent_exceptions = global_data.setdefault('parent_exceptions', {})
currency_fractions = global_data.setdefault('currency_fractions', {})
# create auxiliary zone->territory map from the windows zones (we don't set
# the 'zones_territories' map directly here, because there are some zones
# aliases listed and we defer the decision of which ones to choose to the
# 'bcp47' data
_zone_territory_map = {}
for map_zone in sup_windows_zones.findall(
'.//windowsZones/mapTimezones/mapZone'):
if map_zone.attrib.get('territory') == '001':
win_mapping[map_zone.attrib['other']] = \
map_zone.attrib['type'].split()[0]
for tzid in text_type(map_zone.attrib['type']).split():
_zone_territory_map[tzid] = \
text_type(map_zone.attrib['territory'])
for key_elem in bcp47_timezone.findall('.//keyword/key'):
if key_elem.attrib['name'] == 'tz':
for elem in key_elem.findall('type'):
if 'deprecated' not in elem.attrib:
aliases = text_type(elem.attrib['alias']).split()
tzid = aliases.pop(0)
territory = _zone_territory_map.get(tzid, '001')
territory_zones.setdefault(territory, []).append(tzid)
zone_territories[tzid] = territory
for alias in aliases:
zone_aliases[alias] = tzid
break
# Import Metazone mapping
meta_zones = global_data.setdefault('meta_zones', {})
tzsup = parse(os.path.join(srcdir, 'supplemental', 'metaZones.xml'))
for elem in tzsup.findall('.//timezone'):
for child in elem.findall('usesMetazone'):
if 'to' not in child.attrib: # FIXME: support old mappings
meta_zones[elem.attrib['type']] = child.attrib['mzone']
# Language aliases
for alias in sup_metadata.findall('.//alias/languageAlias'):
# We don't have a use for those at the moment. They don't
# pass our parser anyways.
if '_' in alias.attrib['type']:
continue
language_aliases[alias.attrib['type']] = alias.attrib['replacement']
# Territory aliases
for alias in sup_metadata.findall('.//alias/territoryAlias'):
territory_aliases[alias.attrib['type']] = \
alias.attrib['replacement'].split()
# Script aliases
for alias in sup_metadata.findall('.//alias/scriptAlias'):
script_aliases[alias.attrib['type']] = alias.attrib['replacement']
# Variant aliases
for alias in sup_metadata.findall('.//alias/variantAlias'):
repl = alias.attrib.get('replacement')
if repl:
variant_aliases[alias.attrib['type']] = repl
# Likely subtags
for likely_subtag in sup_likely.findall('.//likelySubtags/likelySubtag'):
likely_subtags[likely_subtag.attrib['from']] = \
likely_subtag.attrib['to']
# Currencies in territories
for region in sup.findall('.//currencyData/region'):
region_code = region.attrib['iso3166']
region_currencies = []
for currency in region.findall('./currency'):
cur_start = _parse_currency_date(currency.attrib.get('from'))
cur_end = _parse_currency_date(currency.attrib.get('to'))
region_currencies.append((currency.attrib['iso4217'],
cur_start, cur_end,
currency.attrib.get(
'tender', 'true') == 'true'))
region_currencies.sort(key=_currency_sort_key)
territory_currencies[region_code] = region_currencies
# Explicit parent locales
for paternity in sup.findall('.//parentLocales/parentLocale'):
parent = paternity.attrib['parent']
for child in paternity.attrib['locales'].split():
parent_exceptions[child] = parent
# Currency decimal and rounding digits
for fraction in sup.findall('.//currencyData/fractions/info'):
cur_code = fraction.attrib['iso4217']
cur_digits = int(fraction.attrib['digits'])
cur_rounding = int(fraction.attrib['rounding'])
cur_cdigits = int(fraction.attrib.get('cashDigits', cur_digits))
cur_crounding = int(fraction.attrib.get('cashRounding', cur_rounding))
currency_fractions[cur_code] = (cur_digits, cur_rounding, cur_cdigits, cur_crounding)
outfile = open(global_path, 'wb')
try:
pickle.dump(global_data, outfile, 2)
finally:
outfile.close()
# build a territory containment mapping for inheritance
regions = {}
for elem in sup.findall('.//territoryContainment/group'):
regions[elem.attrib['type']] = elem.attrib['contains'].split()
# Resolve territory containment
territory_containment = {}
region_items = sorted(regions.items())
for group, territory_list in region_items:
for territory in territory_list:
containers = territory_containment.setdefault(territory, set([]))
if group in territory_containment:
containers |= territory_containment[group]
containers.add(group)
# prepare the per-locale plural rules definitions
plural_rules = {}
prsup = parse(os.path.join(srcdir, 'supplemental', 'plurals.xml'))
for elem in prsup.findall('.//plurals/pluralRules'):
rules = []
for rule in elem.findall('pluralRule'):
rules.append((rule.attrib['count'], text_type(rule.text)))
pr = PluralRule(rules)
for locale in elem.attrib['locales'].split():
plural_rules[locale] = pr
filenames = os.listdir(os.path.join(srcdir, 'main'))
filenames.remove('root.xml')
filenames.sort(key=len)
filenames.insert(0, 'root.xml')
for filename in filenames:
stem, ext = os.path.splitext(filename)
if ext != '.xml':
continue
full_filename = os.path.join(srcdir, 'main', filename)
data_filename = os.path.join(destdir, 'locale-data', stem + '.dat')
data = {}
if not need_conversion(data_filename, data, full_filename):
continue
tree = parse(full_filename)
language = None
elem = tree.find('.//identity/language')
if elem is not None:
language = elem.attrib['type']
territory = None
elem = tree.find('.//identity/territory')
if elem is not None:
territory = elem.attrib['type']
else:
territory = '001' # world
regions = territory_containment.get(territory, [])
log('Processing %s (Language = %s; Territory = %s)',
filename, language, territory)
# plural rules
locale_id = '_'.join(filter(None, [
language,
territory != '001' and territory or None
]))
if locale_id in plural_rules:
data['plural_form'] = plural_rules[locale_id]
# <localeDisplayNames>
territories = data.setdefault('territories', {})
for elem in tree.findall('.//territories/territory'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in territories:
continue
territories[elem.attrib['type']] = _text(elem)
languages = data.setdefault('languages', {})
for elem in tree.findall('.//languages/language'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in languages:
continue
languages[elem.attrib['type']] = _text(elem)
variants = data.setdefault('variants', {})
for elem in tree.findall('.//variants/variant'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in variants:
continue
variants[elem.attrib['type']] = _text(elem)
scripts = data.setdefault('scripts', {})
for elem in tree.findall('.//scripts/script'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in scripts:
continue
scripts[elem.attrib['type']] = _text(elem)
# <dates>
week_data = data.setdefault('week_data', {})
supelem = sup.find('.//weekData')
for elem in supelem.findall('minDays'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['min_days'] = int(elem.attrib['count'])
for elem in supelem.findall('firstDay'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['first_day'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendStart'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['weekend_start'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendEnd'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['weekend_end'] = weekdays[elem.attrib['day']]
zone_formats = data.setdefault('zone_formats', {})
for elem in tree.findall('.//timeZoneNames/gmtFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['gmt'] = text_type(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('.//timeZoneNames/regionFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['region'] = text_type(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('.//timeZoneNames/fallbackFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['fallback'] = text_type(elem.text) \
.replace('{0}', '%(0)s').replace('{1}', '%(1)s')
break
for elem in tree.findall('.//timeZoneNames/fallbackRegionFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['fallback_region'] = text_type(elem.text) \
.replace('{0}', '%(0)s').replace('{1}', '%(1)s')
break
time_zones = data.setdefault('time_zones', {})
for elem in tree.findall('.//timeZoneNames/zone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = text_type(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = text_type(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = text_type(child.text)
time_zones[elem.attrib['type']] = info
meta_zones = data.setdefault('meta_zones', {})
for elem in tree.findall('.//timeZoneNames/metazone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = text_type(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = text_type(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = text_type(child.text)
meta_zones[elem.attrib['type']] = info
for calendar in tree.findall('.//calendars/calendar'):
if calendar.attrib['type'] != 'gregorian':
# TODO: support other calendar types
continue
months = data.setdefault('months', {})
for ctxt in calendar.findall('months/monthContext'):
ctxt_type = ctxt.attrib['type']
ctxts = months.setdefault(ctxt_type, {})
for width in ctxt.findall('monthWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'month':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib.get('type'))] = \
text_type(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['months', ctxt_type, width_type],
elem.attrib['path'])
)
days = data.setdefault('days', {})
for ctxt in calendar.findall('days/dayContext'):
ctxt_type = ctxt.attrib['type']
ctxts = days.setdefault(ctxt_type, {})
for width in ctxt.findall('dayWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'day':
dtype = weekdays[elem.attrib['type']]
if ('draft' in elem.attrib or
'alt' not in elem.attrib) \
and dtype in widths:
continue
widths[dtype] = text_type(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['days', ctxt_type, width_type],
elem.attrib['path'])
)
quarters = data.setdefault('quarters', {})
for ctxt in calendar.findall('quarters/quarterContext'):
ctxt_type = ctxt.attrib['type']
ctxts = quarters.setdefault(ctxt.attrib['type'], {})
for width in ctxt.findall('quarterWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'quarter':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib['type'])] = text_type(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['quarters', ctxt_type,
width_type],
elem.attrib['path']))
eras = data.setdefault('eras', {})
for width in calendar.findall('eras/*'):
width_type = NAME_MAP[width.tag]
widths = eras.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'era':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib.get('type'))] = text_type(elem.text)
elif elem.tag == 'alias':
eras[width_type] = Alias(
_translate_alias(['eras', width_type],
elem.attrib['path'])
)
# AM/PM
periods = data.setdefault('periods', {})
for day_period_width in calendar.findall(
'dayPeriods/dayPeriodContext/dayPeriodWidth'):
if day_period_width.attrib['type'] == 'wide':
for day_period in day_period_width.findall('dayPeriod'):
if 'alt' not in day_period.attrib:
periods[day_period.attrib['type']] = text_type(
day_period.text)
date_formats = data.setdefault('date_formats', {})
for format in calendar.findall('dateFormats'):
for elem in format.getiterator():
if elem.tag == 'dateFormatLength':
if 'draft' in elem.attrib and \
elem.attrib.get('type') in date_formats:
continue
try:
date_formats[elem.attrib.get('type')] = \
dates.parse_pattern(text_type(
elem.findtext('dateFormat/pattern')))
except ValueError as e:
error(e)
elif elem.tag == 'alias':
date_formats = Alias(_translate_alias(
['date_formats'], elem.attrib['path'])
)
time_formats = data.setdefault('time_formats', {})
for format in calendar.findall('timeFormats'):
for elem in format.getiterator():
if elem.tag == 'timeFormatLength':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in time_formats:
continue
try:
time_formats[elem.attrib.get('type')] = \
dates.parse_pattern(text_type(
elem.findtext('timeFormat/pattern')))
except ValueError as e:
error(e)
elif elem.tag == 'alias':
time_formats = Alias(_translate_alias(
['time_formats'], elem.attrib['path'])
)
datetime_formats = data.setdefault('datetime_formats', {})
for format in calendar.findall('dateTimeFormats'):
for elem in format.getiterator():
if elem.tag == 'dateTimeFormatLength':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in datetime_formats:
continue
try:
datetime_formats[elem.attrib.get('type')] = \
text_type(elem.findtext('dateTimeFormat/pattern'))
except ValueError as e:
error(e)
elif elem.tag == 'alias':
datetime_formats = Alias(_translate_alias(
['datetime_formats'], elem.attrib['path'])
)
# <numbers>
number_symbols = data.setdefault('number_symbols', {})
for elem in tree.findall('.//numbers/symbols/*'):
if ('draft' in elem.attrib or 'alt' in elem.attrib):
continue
number_symbols[elem.tag] = text_type(elem.text)
decimal_formats = data.setdefault('decimal_formats', {})
for elem in tree.findall('.//decimalFormats/decimalFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in decimal_formats:
continue
if elem.findall('./alias'):
# TODO map the alias to its target
continue
pattern = text_type(elem.findtext('./decimalFormat/pattern'))
decimal_formats[elem.attrib.get('type')] = \
numbers.parse_pattern(pattern)
scientific_formats = data.setdefault('scientific_formats', {})
for elem in tree.findall('.//scientificFormats/scientificFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in scientific_formats:
continue
pattern = text_type(elem.findtext('scientificFormat/pattern'))
scientific_formats[elem.attrib.get('type')] = \
numbers.parse_pattern(pattern)
currency_formats = data.setdefault('currency_formats', {})
for elem in tree.findall('.//currencyFormats/currencyFormatLength/currencyFormat'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in currency_formats:
continue
for child in elem.getiterator():
if child.tag == 'alias':
currency_formats[elem.attrib.get('type')] = Alias(
_translate_alias(['currency_formats', elem.attrib['type']],
child.attrib['path'])
)
elif child.tag == 'pattern':
pattern = text_type(child.text)
currency_formats[elem.attrib.get('type')] = \
numbers.parse_pattern(pattern)
percent_formats = data.setdefault('percent_formats', {})
for elem in tree.findall('.//percentFormats/percentFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in percent_formats:
continue
pattern = text_type(elem.findtext('percentFormat/pattern'))
percent_formats[elem.attrib.get('type')] = \
numbers.parse_pattern(pattern)
currency_names = data.setdefault('currency_names', {})
currency_names_plural = data.setdefault('currency_names_plural', {})
currency_symbols = data.setdefault('currency_symbols', {})
for elem in tree.findall('.//currencies/currency'):
code = elem.attrib['type']
for name in elem.findall('displayName'):
if ('draft' in name.attrib) and code in currency_names:
continue
if 'count' in name.attrib:
currency_names_plural.setdefault(code, {})[
name.attrib['count']] = text_type(name.text)
else:
currency_names[code] = text_type(name.text)
# TODO: support choice patterns for currency symbol selection
symbol = elem.find('symbol')
if symbol is not None and 'draft' not in symbol.attrib \
and 'choice' not in symbol.attrib:
currency_symbols[code] = text_type(symbol.text)
# <units>
unit_patterns = data.setdefault('unit_patterns', {})
for elem in tree.findall('.//units/unitLength'):
unit_length_type = elem.attrib['type']
for unit in elem.findall('unit'):
unit_type = unit.attrib['type']
for pattern in unit.findall('unitPattern'):
box = unit_type
box += ':' + unit_length_type
unit_patterns.setdefault(box, {})[pattern.attrib['count']] = \
text_type(pattern.text)
date_fields = data.setdefault('date_fields', {})
for elem in tree.findall('.//dates/fields/field'):
field_type = elem.attrib['type']
date_fields.setdefault(field_type, {})
for rel_time in elem.findall('relativeTime'):
rel_time_type = rel_time.attrib['type']
for pattern in rel_time.findall('relativeTimePattern'):
date_fields[field_type].setdefault(rel_time_type, {})\
[pattern.attrib['count']] = text_type(pattern.text)
outfile = open(data_filename, 'wb')
try:
pickle.dump(data, outfile, 2)
finally:
outfile.close()
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# Copyright (c) 2003 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Parallel Coordinates traffic grapher.
#
# This grapher uses the pcap library to listen for packets in transit
# over the specified interface. The returned packages can be filtered
# according to a BPF filter (see tcpdump(3) for further information on
# BPF filters). The packets are displayed on a parallel coordinates
# graph that allows the user to visualize the traffic flow on the
# network in real-time.
#
# The graphing part requires Tk support. Note that the user might need
# special permissions to be able to use pcap.
#
# Authors:
# Gerardo Richarte <gera@coresecurity.com>
# Javier Kohen <jkohen@coresecurity.com>
#
# Reference for:
# pcapy: findalldevs, open_live.
# ImpactPacket.
# ImpactDecoder.
## Some tunable variables follow.
# Period (in ms.) to wait between pcap polls.
POLL_PERIOD = 250
# Period (in ms.) to wait between screen refreshes.
REFRESH_PERIOD = 1000
# Refresh screen after receiving new packets.
# You might want to turn off fast_draws if it consumes too much CPU,
# for instance, when used under X-Window over a network link.
fast_draws = 1
## End of user configurable section.
import socket
import sys
import time
import Tkinter
import pcapy
from pcapy import open_live, findalldevs, PcapError
from impacket.ImpactDecoder import EthDecoder, LinuxSLLDecoder
class NumericAxis:
def __init__(self,canvas,name,low=0,high=0,direction='vertical'):
self.canvas = canvas
self.name = name
self.setLowerLimit(low)
self.setHigherLimit(high)
self.direction = direction
def screenLength(self):
if self.direction == 'vertical':
return (self.canvas.winfo_height())-10
else:
return (self.canvas.winfo_width())-10
def scaleLength(self):
delta = self.getHigherLimit()-self.getLowerLimit()
if not delta:
delta += 1
return delta
def unscale(self,coord):
return int((coord-5)*self.scaleLength()/self.screenLength()+self.getLowerLimit())
def scale(self,value):
return (value-self.getLowerLimit())*self.screenLength()/self.scaleLength()+5
def setLowerLimit(self,limit):
if not limit == None:
self._lowerLimit = limit
def setHigherLimit(self,limit):
if not limit == None:
self._higherLimit = limit
def getLowerLimit(self):
return self._lowerLimit
def getHigherLimit(self):
return self._higherLimit
def addValue(self,value):
if self.getLowerLimit() > value:
self.setLowerLimit(value)
if self.getHigherLimit() < value:
self.setHigherLimit(value)
class SymbolicAxis(NumericAxis):
def __init__(self,canvas,name,values=[],direction = 'vertical'):
NumericAxis.__init__(self,canvas,name,0,len(values)-1,direction)
self.values = list(values)
def addValue(self,value,sort = 1):
try:
self.values.index(value)
return
except:
None
self.values.append(value)
if sort:
self.values.sort()
self.setHigherLimit(len(self.getValues())-1)
def unscale(self,value):
try:
i = NumericAxis.unscale(self, value)
if i < 0: return None
return self.getValues()[i]
except Exception,e:
return None
def scale(self,value):
try:
return NumericAxis.scale(self,self.getValues().index(value))
except:
self.addValue(value)
return NumericAxis.scale(self,self.values.index(value))
def getValues(self):
return self.values
class ParallelCoordinates(Tkinter.Canvas):
def __init__(self, master=None, cnf={}, **kw):
apply(Tkinter.Canvas.__init__, (self, master, cnf), kw)
self.lastSelection = None
self.lastSelectionOval = None
self._onSelection = None
self.minColor = None
self.maxColor = None
self.colorAxis = '_counter'
self.values=[]
self.mainAxis=SymbolicAxis(self,'mainAxis',[],'horizontal')
master.bind('<Visibility>',self.draw)
master.bind('<Motion>',self.buttonDown)
master.bind('<1>',self.buttonDown)
master.bind('<ButtonRelease-1>',self.buttonUp)
def addAxis(self,axis):
self.mainAxis.addValue(axis,0)
def sameValue(self,a,b):
for axis in self.mainAxis.getValues():
if not a[axis.name] == b[axis.name]:
return 0
return 1
def addValue(self,value):
for each in self.values:
if self.sameValue(value,each):
each['_counter'] += 1
each['timestamp'] = value['timestamp']
value = each
break
else:
value['_counter'] = 1
for axis in self.mainAxis.getValues():
axis.addValue(value[axis.name])
self.values.append(value)
color = value[self.colorAxis]
if None == self.minColor or self.minColor > color:
self.minColor = color
if None == self.maxColor or self.maxColor < color:
self.maxColor = color
def removeValue(self, value):
self.values.remove(value)
def basicColor(self,val,fade = 1):
# color scale is linear going through green -> yellow -> red
# (lower to higher)
if val < 0.5:
val += val # val *= 2 (scale from 0 to 1)
# between green - yellow
red = 64*(1-val) + 255*val
green = 200*(1-val) + 255*val
blue = 64*(1-val) + 0
else:
val -= 0.5
val += val
red = 255*(1-val) + 255*val
green = 255*(1-val) + 64*val
blue = 0 + 0
return '#%02x%02x%02x' % (int(red*fade), int(green*fade), int(blue*fade))
def fade(self,value):
return max(0,(120.0-time.time()+value['timestamp'])/120.0)
def color(self,value,fade = 1):
# color scale is linear going through green -> yellow -> red (lower to higher)
val = float(value[self.colorAxis]-self.minColor)/(self.maxColor-self.minColor+1)
return self.basicColor(val,fade)
def drawValueLine(self,value):
x = -1
y = -1
fade = self.fade(value)
if not fade:
self.removeValue(value)
return
color = self.color(value,fade)
for axis in self.mainAxis.getValues():
px = x
py = y
x = self.mainAxis.scale(axis)
y = axis.scale(value[axis.name])
if not px == -1:
self.create_line(px,py,x,y,fill = color)
def draw(self,event = None):
# draw axis
for i in self.find_all():
self.delete(i)
for axis in self.mainAxis.getValues():
x = self.mainAxis.scale(axis)
self.create_line(x,5,x,int(self.winfo_height())-5,fill = 'white')
for value in self.values:
self.drawValueLine(value)
# draw color range
# for i in range(200):
# c = self.basicColor((i+0.0)/200)
# self.create_line(0,i,100,i,fill = c)
def buttonDown(self,event):
if (event.state & 0x0100) or (event.type == '4'):
axis = self.mainAxis.unscale(event.x)
if not axis: return
element = axis.unscale(event.y)
if not element: return
x = self.mainAxis.scale(axis)
y = axis.scale(element)
if self.lastSelectionOval:
self.delete(self.lastSelectionOval)
self.lastSelectionOval = self.create_oval(x-3,y-3,x+3,y+3,fill = "yellow")
if not self.lastSelection == (axis,element):
self.lastSelection = (axis,element)
if self._onSelection:
self._onSelection(self.lastSelection)
def buttonUp(self,event):
if self.lastSelectionOval:
self.delete(self.lastSelectionOval)
self.lastSelectionOval = None
self.lastSelection = None
if self._onSelection:
self._onSelection(None)
def onSelection(self,_onSelection):
self._onSelection = _onSelection
class Tracer:
def __init__(self, interface = 'eth0', filter = ''):
print "Tracing interface %s with filter `%s'." % (interface, filter)
self.tk = Tkinter.Tk()
self.pc = ParallelCoordinates(self.tk,background = "black")
self.pc.pack(expand=1, fill="both")
self.status = Tkinter.Label(self.tk)
self.status.pack()
self.tk.tkraise()
self.tk.title('Personal SIDRA (IP-Tracer)')
self.pc.addAxis(NumericAxis(self.pc, 'proto',256))
self.pc.addAxis(SymbolicAxis(self.pc,'shost'))
self.pc.addAxis(SymbolicAxis(self.pc,'sport'))
self.pc.addAxis(SymbolicAxis(self.pc,'dport'))
self.pc.addAxis(SymbolicAxis(self.pc,'dhost'))
self.pc.onSelection(self.newSelection)
self.interface = interface
self.filter = filter
def timerDraw(self,event = None):
self.pc.draw()
self.tk.after(REFRESH_PERIOD, self.timerDraw);
def start(self):
self.p = open_live(self.interface, 1600, 0, 100)
## self.p.setnonblock(1)
if self.filter:
self.p.setfilter(self.filter)
# Query the type of the link and instantiate a decoder accordingly.
datalink = self.p.datalink()
if pcapy.DLT_EN10MB == datalink:
self.decoder = EthDecoder()
elif pcapy.DLT_LINUX_SLL == datalink:
self.decoder = LinuxSLLDecoder()
else:
raise Exception("Datalink type not supported: " % datalink)
self.tk.after(POLL_PERIOD, self.poll)
self.tk.after(REFRESH_PERIOD, self.timerDraw);
self.tk.bind('q',self.quit)
self.tk.mainloop()
def quit(self,event):
self.tk.quit()
def poll(self,event = None):
self.tk.after(POLL_PERIOD, self.poll)
received = 0
while 1:
try:
hdr, data = self.p.next()
except PcapError, e:
break
self.newPacket(hdr.getcaplen(), data, hdr.getts()[0])
received = 1
if received and fast_draws:
self.pc.draw()
def newPacket(self, len, data, timestamp):
try:
p = self.decoder.decode(data)
except Exception, e:
pass
value = {}
try:
value['timestamp']=timestamp
value['shost']=p.child().get_ip_src()
value['dhost']=p.child().get_ip_dst()
value['proto']=p.child().child().protocol
value['sport']=-1
value['dport']=-1
except:
return
try:
if value['proto'] == socket.IPPROTO_TCP:
value['dport']=p.child().child().get_th_dport()
value['sport']=p.child().child().get_th_sport()
elif value['proto'] == socket.IPPROTO_UDP:
value['dport']=p.child().child().get_uh_dport()
value['sport']=p.child().child().get_uh_sport()
except:
pass
self.pc.addValue(value)
def setStatus(self,status):
self.status.configure(text = status)
def newSelection(self, selection):
if selection:
self.setStatus('%s:%s' % (selection[0].name, selection[1]))
else:
self.setStatus('')
def getInterfaces():
# Grab a list of interfaces that pcap is able to listen on.
# The current user will be able to listen from all returned interfaces,
# using open_live to open them.
ifs = findalldevs()
# No interfaces available, abort.
if 0 == len(ifs):
return "You don't have enough permissions to open any interface on this system."
return ifs
def printUsage():
print """Usage: %s [interface [filter]]
Interface is the name of a local network interface, see the list of available interfaces below.
Filter is a BPF filter, as described in tcpdump(3)'s man page.
Available interfaces for this user: %s
""" % (sys.argv[0], getInterfaces())
def main():
if len(sys.argv) == 1:
printUsage()
graph = Tracer()
elif len(sys.argv) == 2:
graph = Tracer(sys.argv[1])
elif len(sys.argv) == 3:
graph = Tracer(sys.argv[1],sys.argv[2])
else:
printUsage()
sys.exit(1)
graph.start()
main()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Backend classes should provide common interface
"""
import uuid
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib.auth import authenticate, login
from django.core.urlresolvers import reverse
from django.core.mail import EmailMessage
from django.http import Http404
from django.shortcuts import render, redirect
from django.template import Context, loader
from django.utils.translation import ugettext as _
from ..models import get_user_model
from ..utils import create_organization
from ..utils import model_field_attr
from .forms import UserRegistrationForm, OrganizationRegistrationForm
from .tokens import RegistrationTokenGenerator
class BaseBackend(object):
"""
Base backend class for registering and inviting users to an organization
"""
org_model = None
def __init__(self, org_model=None, *args, **kwargs):
self.user_model = get_user_model()
self.org_model = org_model
def get_urls(self):
raise NotImplementedError
def get_success_url(self):
"""Will return the class's `success_url` attribute unless overridden"""
raise NotImplementedError
def get_form(self, **kwargs):
"""Returns the form for registering or inviting a user"""
if not hasattr(self, 'form_class'):
raise AttributeError(_("You must define a form_class"))
return self.form_class(**kwargs)
def get_token(self, user, **kwargs):
"""Returns a unique token for the given user"""
return RegistrationTokenGenerator().make_token(user)
def get_username(self):
"""
Returns a UUID-based 'random' and unique username.
This is required data for user models with a username field.
"""
return str(uuid.uuid4())[:model_field_attr(self.user_model, 'username', 'max_length')]
def activate_organizations(self, user):
"""
Activates the related organizations for the user.
It only activates the related organizations by model type - that is, if
there are multiple types of organizations then only organizations in
the provided model class are activated.
"""
try:
relation_name = self.org_model().user_relation_name
except TypeError:
# No org_model specified, raises a TypeError because NoneType is
# not callable. Thiis the most sensible default
relation_name = "organizations_organization"
organization_set = getattr(user, relation_name)
for org in organization_set.filter(is_active=False):
org.is_active = True
org.save()
def activate_view(self, request, user_id, token):
"""
View function that activates the given User by setting `is_active` to
true if the provided information is verified.
"""
try:
user = self.user_model.objects.get(id=user_id, is_active=False)
except self.user_model.DoesNotExist:
raise Http404(_("Your URL may have expired."))
if not RegistrationTokenGenerator().check_token(user, token):
raise Http404(_("Your URL may have expired."))
form = self.get_form(data=request.POST or None, instance=user)
if form.is_valid():
form.instance.is_active = True
user = form.save()
user.set_password(form.cleaned_data['password'])
user.save()
self.activate_organizations(user)
user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password'])
login(request, user)
return redirect(self.get_success_url())
return render(request, 'organizations/register_form.html',
{'form': form})
def send_reminder(self, user, sender=None, **kwargs):
"""Sends a reminder email to the specified user"""
if user.is_active:
return False
token = RegistrationTokenGenerator().make_token(user)
kwargs.update({'token': token})
self._send_email(user, self.reminder_subject, self.reminder_body,
sender, **kwargs)
# This could be replaced with a more channel agnostic function, most likely
# in a custom backend.
def _send_email(self, user, subject_template, body_template,
sender=None, **kwargs):
"""Utility method for sending emails to new users"""
if sender:
from_email = "%s %s <%s>" % (sender.first_name, sender.last_name,
settings.DEFAULT_FROM_EMAIL)
reply_to = "%s %s <%s>" % (sender.first_name, sender.last_name,
sender.email)
else:
from_email = settings.DEFAULT_FROM_EMAIL
reply_to = from_email
headers = {'Reply-To': reply_to}
kwargs.update({'sender': sender, 'user': user})
ctx = Context(kwargs, autoescape=False)
subject_template = loader.get_template(subject_template)
body_template = loader.get_template(body_template)
subject = subject_template.render(ctx).strip() # Remove stray newline characters
body = body_template.render(ctx)
return EmailMessage(subject, body, from_email, [user.email],
headers=headers).send()
class RegistrationBackend(BaseBackend):
"""
A backend for allowing new users to join the site by creating a new user
associated with a new organization.
"""
# NOTE this backend stands to be simplified further, as email verification
# should be beyond the purview of this app
activation_subject = 'organizations/email/activation_subject.txt'
activation_body = 'organizations/email/activation_body.html'
reminder_subject = 'organizations/email/reminder_subject.txt'
reminder_body = 'organizations/email/reminder_body.html'
form_class = UserRegistrationForm
def get_success_url(self):
return reverse('registration_success')
def get_urls(self):
return patterns('',
url(r'^complete/$', view=self.success_view,
name="registration_success"),
url(r'^(?P<user_id>[\d]+)-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
view=self.activate_view, name="registration_register"),
url(r'^$', view=self.create_view, name="registration_create"),
)
def register_by_email(self, email, sender=None, request=None, **kwargs):
"""
Returns a User object filled with dummy data and not active, and sends
an invitation email.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(username=self.get_username(),
email=email, password=self.user_model.objects.make_random_password())
user.is_active = False
user.save()
self.send_activation(user, sender, **kwargs)
return user
def send_activation(self, user, sender=None, **kwargs):
"""
Invites a user to join the site
"""
if user.is_active:
return False
token = self.get_token(user)
kwargs.update({'token': token})
self._send_email(user, self.activation_subject, self.activation_body,
sender, **kwargs)
def create_view(self, request):
"""
Initiates the organization and user account creation process
"""
if request.user.is_authenticated():
return redirect("organization_add")
form = OrganizationRegistrationForm(request.POST or None)
if form.is_valid():
try:
user = self.user_model.objects.get(email=form.cleaned_data['email'])
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(username=self.get_username(),
email=form.cleaned_data['email'],
password=self.user_model.objects.make_random_password())
user.is_active = False
user.save()
else:
return redirect("organization_add")
organization = create_organization(user, form.cleaned_data['name'],
form.cleaned_data['slug'], is_active=False)
return render(request, 'organizations/register_success.html',
{'user': user, 'organization': organization})
return render(request, 'organizations/register_form.html',
{'form': form})
def success_view(self, request):
return render(request, 'organizations/register_success.html', {})
class InvitationBackend(BaseBackend):
"""
A backend for inviting new users to join the site as members of an
organization.
"""
invitation_subject = 'organizations/email/invitation_subject.txt'
invitation_body = 'organizations/email/invitation_body.html'
reminder_subject = 'organizations/email/reminder_subject.txt'
reminder_body = 'organizations/email/reminder_body.html'
form_class = UserRegistrationForm
def get_success_url(self):
# TODO get this url name from an attribute
return reverse('organization_list')
def get_urls(self):
# TODO enable naming based on a model?
return patterns('',
url(r'^(?P<user_id>[\d]+)-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
view=self.activate_view, name="invitations_register"),
)
def invite_by_email(self, email, sender=None, request=None, **kwargs):
"""Creates an inactive user with the information we know and then sends
an invitation email for that user to complete registration.
If your project uses email in a different way then you should make to
extend this method as it only checks the `email` attribute for Users.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
# TODO break out user creation process
user = self.user_model.objects.create(username=self.get_username(),
email=email, password=self.user_model.objects.make_random_password())
user.is_active = False
user.save()
self.send_invitation(user, sender, **kwargs)
return user
def send_invitation(self, user, sender=None, **kwargs):
"""An intermediary function for sending an invitation email that
selects the templates, generating the token, and ensuring that the user
has not already joined the site.
"""
if user.is_active:
return False
token = self.get_token(user)
kwargs.update({'token': token})
self._send_email(user, self.invitation_subject, self.invitation_body,
sender, **kwargs)
| |
# -*- coding: utf-8 -*-
import unittest
from mock import Mock
import cloud4rpi
from cloud4rpi.errors import InvalidConfigError
from cloud4rpi.errors import UnexpectedVariableTypeError
from cloud4rpi.errors import UnexpectedVariableValueTypeError
class ApiClientMock(object):
def __init__(self):
def noop_on_command(cmd):
pass
self.publish_config = Mock()
self.publish_data = Mock()
self.publish_diag = Mock()
self.on_command = noop_on_command
def assert_publish_data_called_with(self, expected):
return self.publish_data.assert_called_with(expected, data_type='cr')
def raise_on_command(self, cmd):
self.on_command(cmd)
class MockSensor(object):
def __init__(self, value=42):
self.read = Mock(return_value=value)
self.__innerValue__ = value
def get_state(self):
return self.__innerValue__
def get_updated_state(self, value):
self.__innerValue__ = value
return self.__innerValue__
def get_incremented_state(self, value):
return self.__innerValue__ + value
class TestDevice(unittest.TestCase):
def testDeclareVariables(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'CPUTemp': {
'type': 'numeric',
'bind': MockSensor()
}
})
cfg = device.read_config()
self.assertEqual(cfg, [{'name': 'CPUTemp', 'type': 'numeric'}])
def testDeclareVariablesValidation(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
with self.assertRaises(UnexpectedVariableTypeError):
device.declare({
'CPUTemp': {
'type': 'number',
'bind': MockSensor()
}
})
def testDeclareDiag(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare_diag({
'IPAddress': '8.8.8.8',
'Host': 'hostname',
})
diag = device.read_diag()
self.assertEqual(diag, {'IPAddress': '8.8.8.8', 'Host': 'hostname'})
def testReadConfig(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'SomeVar': {
'type': 'string'
}
})
device.declare({
'CPUTemp': {
'type': 'numeric',
'bind': MockSensor()
}
})
cfg = device.read_config()
self.assertEqual(cfg, [{'name': 'CPUTemp', 'type': 'numeric'}])
def testReadConfigIfNotDeclared(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
self.assertEqual(device.read_config(), [])
def testReadVariables(self):
handler = {}
temperature_sensor = MockSensor(73)
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'LEDOn': {
'type': 'bool',
'value': False,
'bind': handler
},
'Temperature': {
'type': 'numeric',
'value': True,
'bind': temperature_sensor
}
})
data = device.read_data()
self.assertEqual(data, {
'LEDOn': False,
'Temperature': 73
})
def testReadVariablesDoesNotContainsEmptyVars(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
self.assertEqual(device.read_data(), {})
def testReadVariablesFromClassMethod(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
sensor = MockSensor(10)
device.declare({
'MyParam': {
'type': 'numeric',
'bind': sensor.get_state
},
})
data = device.read_data()
self.assertEqual(data, {
'MyParam': 10,
})
def testReadVariablesFromClassMethodWithCurrent(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
sensor = MockSensor(10)
device.declare({
'MyParam': {
'type': 'numeric',
'value': 1,
'bind': sensor.get_incremented_state
},
})
data = device.read_data()
self.assertEqual(data, {
'MyParam': 11,
})
def testReadDiag(self):
temperature_sensor = MockSensor(73)
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare_diag({
'CPUTemperature': temperature_sensor,
'IPAddress': lambda x: '8.8.8.8',
'OSName': lambda x: 'Linux',
'Host': 'weather_station'
})
diag = device.read_diag()
self.assertEqual(diag, {
'CPUTemperature': 73,
'IPAddress': '8.8.8.8',
'OSName': 'Linux',
'Host': 'weather_station'
})
def testPublishConfig(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
cfg = [
{'name': 'CPUTemp', 'type': 'numeric'},
{'name': 'Cooler', 'type': 'bool'}
]
device.publish_config(cfg)
api.publish_config.assert_called_with(cfg)
def testReadBeforePublishConfig(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'CPUTemp': {
'type': 'numeric',
'bind': MockSensor()
}
})
device.publish_config()
cfg = [{'name': 'CPUTemp', 'type': 'numeric'}]
api.publish_config.assert_called_with(cfg)
def testPublishConfigFail_NotAnArray(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
cfg = {'name': 'CPUTemp', 'type': 'numeric'}
with self.assertRaises(InvalidConfigError):
device.publish_config(cfg)
api.publish_config.assert_not_called()
def testPublishConfigFail_UnexpectedVariableType(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
cfg = [{'name': 'CPUTemp', 'type': 'number'}]
with self.assertRaises(UnexpectedVariableTypeError):
device.publish_config(cfg)
api.publish_config.assert_not_called()
def testPublishDiag(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
diag = {
'IPAddress': '8.8.8.8',
'Host': 'hostname'
}
device.publish_diag(diag)
api.publish_diag.assert_called_with(diag)
def testReadBeforePublishDiag(self):
temperature_sensor = MockSensor(24)
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare_diag({
'CPUTemperature': temperature_sensor,
'IPAddress': lambda x: '8.8.8.8',
})
device.publish_diag()
diag = {'IPAddress': '8.8.8.8', 'CPUTemperature': 24}
api.publish_diag.assert_called_with(diag)
def testPublishVariablesOnlyData(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'Temperature': {
'type': 'numeric'
},
'Cooler': {
'type': 'bool',
}
})
data = {
'Temperature': 36.6,
'Cooler': True,
'TheAnswer': 42
}
device.publish_data(data)
api.publish_data.assert_called_with({
'Temperature': 36.6,
'Cooler': True
})
def testPublishNotDeclaredVariables(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
data = {
'Temperature': 36.6,
'Cooler': True,
'TheAnswer': 42
}
device.publish_data(data)
api.publish_data.assert_called_with({})
def testReadBeforePublishData(self):
temperature_sensor = MockSensor(24)
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'Temperature': {
'type': 'numeric',
'value': True,
'bind': temperature_sensor
}
})
device.publish_data()
data = {'Temperature': 24}
api.publish_data.assert_called_with(data)
def testDataReadValidation_Bool(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'CoolerOn': {
'type': 'bool',
'value': True,
'bind': lambda x: 100
}
})
device.publish_data()
data = {'CoolerOn': True}
api.publish_data.assert_called_with(data)
def testDataReadValidation_Numeric(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'ReadyState': {
'type': 'numeric',
'value': True,
'bind': lambda x: True
}
})
device.publish_data()
data = {'ReadyState': 1}
api.publish_data.assert_called_with(data)
def testDataReadValidation_String(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'ReadyState': {
'type': 'string',
'value': True,
'bind': lambda x: True
}
})
device.publish_data()
data = {'ReadyState': 'true'}
api.publish_data.assert_called_with(data)
def testDataReadValidation_Location(self):
api = ApiClientMock()
device = cloud4rpi.Device(api)
device.declare({
'MyLocation': {
'type': 'location',
'value': True,
'bind': lambda x: {'lat': 37.89, 'lng': 75.43}
}
})
device.publish_data()
data = {'MyLocation': {'lat': 37.89, 'lng': 75.43}}
api.publish_data.assert_called_with(data)
class CommandHandling(unittest.TestCase):
def setUp(self):
super(CommandHandling, self).setUp()
self.api = ApiClientMock()
self.device = cloud4rpi.Device(self.api)
def testCallsBoundFunction(self):
handler = Mock(return_value=True)
self.device.declare({
'LEDOn': {
'type': 'bool',
'value': False,
'bind': handler
}
})
self.api.raise_on_command({'LEDOn': True})
handler.assert_called_with(True)
def testCallsBoundFunctionWithAnArgument(self):
sensor = MockSensor(0)
self.device.declare({
'Status': {
'type': 'numeric',
'value': 10,
'bind': sensor.get_updated_state
}
})
self.api.raise_on_command({'Status': 20})
self.api.assert_publish_data_called_with({'Status': 20})
def testBindIsNotCallableFunction(self):
self.device.declare({
'LEDOn': {
'type': 'bool',
'value': False,
'bind': 'this is not a function'
}
})
expected = {'LEDOn': True}
self.api.raise_on_command(expected)
self.api.assert_publish_data_called_with(expected)
data = self.device.read_data()
self.assertEqual(data, expected)
def testDirectUpdateVariableValue(self):
self.device.declare({
'LEDOn': {
'type': 'bool',
'value': False,
}
})
expected = {'LEDOn': True}
self.api.raise_on_command(expected)
self.api.assert_publish_data_called_with(expected)
data = self.device.read_data()
self.assertEqual(data, expected)
def testSkipUnknownVariable(self):
self.device.declare({
'LEDOn': {
'type': 'bool',
'value': False,
'bind': lambda x: x
}
})
self.api.raise_on_command({'Other': True})
self.api.publish_data.assert_not_called()
def testAllowPublishNullValue(self):
self.device.declare({
'LEDOn': {
'type': 'bool',
'value': False,
'bind': lambda x: None
}
})
self.api.raise_on_command({'LEDOn': True})
self.api.assert_publish_data_called_with({'LEDOn': None})
def testValidateCommandValueForBool(self):
self.device.declare({
'LEDOn': {
'type': 'bool',
'value': False,
'bind': lambda x: x
}
})
with self.assertRaises(UnexpectedVariableValueTypeError):
self.api.raise_on_command({'LEDOn': 'false'})
def testValidateCommandValueStringToNumeric(self):
self.device.declare({
'Status': {
'type': 'numeric',
'value': 0,
'bind': lambda x: x
}
})
self.api.raise_on_command({'Status': '100'})
self.api.assert_publish_data_called_with({'Status': 100})
def testValidateCommandValueUnicodeToNumeric(self):
self.device.declare({
'Status': {
'type': 'numeric',
'value': 0,
'bind': lambda x: x
}
})
unicode_val = u'38.5'
self.api.raise_on_command({'Status': unicode_val})
self.api.assert_publish_data_called_with({'Status': 38.5})
def testValidateCommandValueBoolToNumeric(self):
self.device.declare({
'Status': {
'type': 'numeric',
'value': 0,
'bind': lambda x: x
}
})
self.api.raise_on_command({'Status': True})
self.api.assert_publish_data_called_with({'Status': 1})
def testValidateCommandValueUnicodeToString(self):
self.device.declare({
'Percent': {
'type': 'string',
'value': 0,
'bind': lambda x: x
}
})
unicode_val = u'38.5%'
self.api.raise_on_command({'Percent': unicode_val})
self.api.assert_publish_data_called_with({'Percent': '38.5%'})
def testPublishBackUpdatedVariableValues(self):
sensor = MockSensor(36.6)
self.device.declare({
'LEDOn': {
'type': 'bool',
'value': False,
'bind': lambda x: x
},
'Cooler': {
'type': 'bool',
'value': True,
'bind': lambda x: x
},
'Status': {
'type': 'numeric',
'value': 0,
'bind': lambda x: 42
},
'Temp': {
'type': 'numeric',
'value': 24.4,
'bind': sensor
}
})
self.api.raise_on_command({'LEDOn': True,
'Cooler': False,
'Status': 2,
'Temp': 36.6})
expected = {
'Cooler': False,
'Status': 42,
'LEDOn': True,
'Temp': 36.6
}
self.api.assert_publish_data_called_with(expected)
def testPublishBackOnlyCommandVariables(self):
self.device.declare({
'Actuator': {
'type': 'string',
'value': 'to be updated and published',
'bind': lambda x: x
},
'Sensor': {
'type': 'string',
'value': None,
'bind': 'do not updated by a command'
},
})
self.api.raise_on_command({'Actuator': 'ON'})
self.api.assert_publish_data_called_with({'Actuator': 'ON'})
class PayloadValidation(unittest.TestCase):
def setUp(self):
super(PayloadValidation, self).setUp()
self.api = ApiClientMock()
self.device = cloud4rpi.Device(self.api)
def testNumeric(self):
self.device.declare({'Temp': {'type': 'numeric'}})
self.device.publish_data({'Temp': 36.3})
self.api.publish_data.assert_called_with({'Temp': 36.3})
def testNumericAsNull(self):
self.device.declare({'Temp': {'type': 'numeric'}})
self.device.publish_data({'Temp': None})
self.api.publish_data.assert_called_with({'Temp': None})
def testNumericAsInt(self):
self.device.declare({'Temp': {'type': 'numeric'}})
self.device.publish_data({'Temp': 36})
self.api.publish_data.assert_called_with({'Temp': 36})
def testNumericAsFloat(self):
self.device.declare({'Temp': {'type': 'numeric'}})
self.device.publish_data({'Temp': 36.6})
self.api.publish_data.assert_called_with({'Temp': 36.6})
def testNumericAsString(self):
self.device.declare({'Temp': {'type': 'numeric'}})
self.device.publish_data({'Temp': "36.6"})
self.api.publish_data.assert_called_with({'Temp': 36.6})
def testNumericAsBool(self):
self.device.declare({'Temp': {'type': 'numeric'}})
self.device.publish_data({'Temp': True})
self.api.publish_data.assert_called_with({'Temp': 1.0})
def testNumericAsNaN(self):
self.device.declare({'Temp': {'type': 'numeric'}})
self.device.publish_data({'Temp': float('NaN')})
self.api.publish_data.assert_called_with({'Temp': None})
def testNumericAsPositiveInfinity(self):
self.device.declare({'Temp': {'type': 'numeric'}})
self.device.publish_data({'Temp': float('Inf')})
self.api.publish_data.assert_called_with({'Temp': None})
def testNumericAsNegativeInfinity(self):
self.device.declare({'Temp': {'type': 'numeric'}})
self.device.publish_data({'Temp': -float('Inf')})
self.api.publish_data.assert_called_with({'Temp': None})
def testBool(self):
self.device.declare({'PowerOn': {'type': 'bool'}})
self.device.publish_data({'PowerOn': True})
self.api.publish_data.assert_called_with({'PowerOn': True})
def testBoolAsNull(self):
self.device.declare({'PowerOn': {'type': 'bool'}})
self.device.publish_data({'PowerOn': None})
self.api.publish_data.assert_called_with({'PowerOn': None})
def testBoolAsString(self):
self.device.declare({'PowerOn': {'type': 'bool'}})
with self.assertRaises(UnexpectedVariableValueTypeError):
self.device.publish_data({'PowerOn': "True"})
def testBoolAsPositiveNumber(self):
self.device.declare({'PowerOn': {'type': 'bool'}})
self.device.publish_data({'PowerOn': 24.1})
self.api.publish_data.assert_called_with({'PowerOn': True})
def testBoolAsNegativeNumber(self):
self.device.declare({'PowerOn': {'type': 'bool'}})
self.device.publish_data({'PowerOn': -10.1})
self.api.publish_data.assert_called_with({'PowerOn': True})
def testBoolAsZeroNumber(self):
self.device.declare({'PowerOn': {'type': 'bool'}})
self.device.publish_data({'PowerOn': 0})
self.api.publish_data.assert_called_with({'PowerOn': False})
def testBoolAsNaN(self):
self.device.declare({'PowerOn': {'type': 'bool'}})
self.device.publish_data({'PowerOn': float('NaN')})
self.api.publish_data.assert_called_with({'PowerOn': True})
def testBoolAsPositiveInfinity(self):
self.device.declare({'PowerOn': {'type': 'bool'}})
self.device.publish_data({'PowerOn': float('Inf')})
self.api.publish_data.assert_called_with({'PowerOn': True})
def testBoolAsNegativeInfinity(self):
self.device.declare({'PowerOn': {'type': 'bool'}})
self.device.publish_data({'PowerOn': -float('Inf')})
self.api.publish_data.assert_called_with({'PowerOn': True})
def testString(self):
self.device.declare({'Status': {'type': 'string'}})
self.device.publish_data({'Status': '100'})
self.api.publish_data.assert_called_with({'Status': '100'})
def testStringAsNull(self):
self.device.declare({'Status': {'type': 'string'}})
self.device.publish_data({'Status': None})
self.api.publish_data.assert_called_with({'Status': None})
def testStringAsNumeric(self):
self.device.declare({'Status': {'type': 'string'}})
self.device.publish_data({'Status': 100.100})
self.api.publish_data.assert_called_with({'Status': '100.1'})
def testStringAsNaN(self):
self.device.declare({'Status': {'type': 'string'}})
self.device.publish_data({'Status': float('NaN')})
self.api.publish_data.assert_called_with({'Status': 'nan'})
def testStringAsPositiveInfinity(self):
self.device.declare({'Status': {'type': 'string'}})
self.device.publish_data({'Status': float('Inf')})
self.api.publish_data.assert_called_with({'Status': 'inf'})
def testStringAsNegativeInfinity(self):
self.device.declare({'Status': {'type': 'string'}})
self.device.publish_data({'Status': -float('Inf')})
self.api.publish_data.assert_called_with({'Status': '-inf'})
def testStringAsInt(self):
self.device.declare({'Status': {'type': 'string'}})
self.device.publish_data({'Status': 100})
self.api.publish_data.assert_called_with({'Status': '100'})
def testStringAsBool(self):
self.device.declare({'Status': {'type': 'string'}})
self.device.publish_data({'Status': True})
self.api.publish_data.assert_called_with({'Status': 'true'})
def testLocation(self):
location = {'lat': 37.89, 'lng': 75.43}
self.device.declare({'Pos': {'type': 'location'}})
self.device.publish_data({'Pos': location})
self.api.publish_data.assert_called_with({'Pos': location})
def testLocation_Filtering(self):
obj = {'some': 'foo', 'lng': 75.43, 'lat': 37.89, 'other': 42}
self.device.declare({'Pos': {'type': 'location'}})
self.device.publish_data({'Pos': obj})
location = {'lat': 37.89, 'lng': 75.43}
self.api.publish_data.assert_called_with({'Pos': location})
def testLocationAsNull(self):
self.device.declare({'Pos': {'type': 'location'}})
self.device.publish_data({'Pos': None})
self.api.publish_data.assert_called_with({'Pos': None})
def testLocationAsNaN(self):
self.device.declare({'Pos': {'type': 'location'}})
with self.assertRaises(UnexpectedVariableValueTypeError):
self.device.publish_data({'Pos': float('NaN')})
def testLocationAsInfinity(self):
self.device.declare({'Pos': {'type': 'location'}})
with self.assertRaises(UnexpectedVariableValueTypeError):
self.device.publish_data({'Pos': float('Inf')})
def testLocationAsEmptyObject(self):
self.device.declare({'Pos': {'type': 'location'}})
with self.assertRaises(UnexpectedVariableValueTypeError):
self.device.publish_data({'Pos': {}})
def testLocationWithIncorrectFields(self):
location = {'Latitude': 37.89, 'LNG': 75.43}
self.device.declare({'Pos': {'type': 'location'}})
with self.assertRaises(UnexpectedVariableValueTypeError):
self.device.publish_data({'Pos': location})
def testLocationWithoutLatitude(self):
location = {'lng': 75.43}
self.device.declare({'Pos': {'type': 'location'}})
with self.assertRaises(UnexpectedVariableValueTypeError):
self.device.publish_data({'Pos': location})
def testLocationWithoutLongitude(self):
location = {'lat': 37.89}
self.device.declare({'Pos': {'type': 'location'}})
with self.assertRaises(UnexpectedVariableValueTypeError):
self.device.publish_data({'Pos': location})
| |
"""Demo platform for the cover component."""
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP_TILT,
CoverEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.event import async_track_utc_time_change
from . import DOMAIN
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Demo covers."""
async_add_entities(
[
DemoCover(hass, "cover_1", "Kitchen Window"),
DemoCover(hass, "cover_2", "Hall Window", 10),
DemoCover(hass, "cover_3", "Living Room Window", 70, 50),
DemoCover(
hass,
"cover_4",
"Garage Door",
device_class="garage",
supported_features=(SUPPORT_OPEN | SUPPORT_CLOSE),
),
DemoCover(
hass,
"cover_5",
"Pergola Roof",
tilt_position=60,
supported_features=(
SUPPORT_OPEN_TILT
| SUPPORT_STOP_TILT
| SUPPORT_CLOSE_TILT
| SUPPORT_SET_TILT_POSITION
),
),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoCover(CoverEntity):
"""Representation of a demo cover."""
def __init__(
self,
hass,
unique_id,
name,
position=None,
tilt_position=None,
device_class=None,
supported_features=None,
):
"""Initialize the cover."""
self.hass = hass
self._unique_id = unique_id
self._name = name
self._position = position
self._device_class = device_class
self._supported_features = supported_features
self._set_position = None
self._set_tilt_position = None
self._tilt_position = tilt_position
self._requested_closing = True
self._requested_closing_tilt = True
self._unsub_listener_cover = None
self._unsub_listener_cover_tilt = None
self._is_opening = False
self._is_closing = False
if position is None:
self._closed = True
else:
self._closed = self.current_cover_position <= 0
@property
def device_info(self) -> DeviceInfo:
"""Return device info."""
return DeviceInfo(
identifiers={
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self.unique_id)
},
name=self.name,
)
@property
def unique_id(self):
"""Return unique ID for cover."""
return self._unique_id
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return False
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._position
@property
def current_cover_tilt_position(self):
"""Return the current tilt position of the cover."""
return self._tilt_position
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._closed
@property
def is_closing(self):
"""Return if the cover is closing."""
return self._is_closing
@property
def is_opening(self):
"""Return if the cover is opening."""
return self._is_opening
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._device_class
@property
def supported_features(self):
"""Flag supported features."""
if self._supported_features is not None:
return self._supported_features
return super().supported_features
async def async_close_cover(self, **kwargs):
"""Close the cover."""
if self._position == 0:
return
if self._position is None:
self._closed = True
self.async_write_ha_state()
return
self._is_closing = True
self._listen_cover()
self._requested_closing = True
self.async_write_ha_state()
async def async_close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
if self._tilt_position in (0, None):
return
self._listen_cover_tilt()
self._requested_closing_tilt = True
async def async_open_cover(self, **kwargs):
"""Open the cover."""
if self._position == 100:
return
if self._position is None:
self._closed = False
self.async_write_ha_state()
return
self._is_opening = True
self._listen_cover()
self._requested_closing = False
self.async_write_ha_state()
async def async_open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
if self._tilt_position in (100, None):
return
self._listen_cover_tilt()
self._requested_closing_tilt = False
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
position = kwargs.get(ATTR_POSITION)
self._set_position = round(position, -1)
if self._position == position:
return
self._listen_cover()
self._requested_closing = position < self._position
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover til to a specific position."""
tilt_position = kwargs.get(ATTR_TILT_POSITION)
self._set_tilt_position = round(tilt_position, -1)
if self._tilt_position == tilt_position:
return
self._listen_cover_tilt()
self._requested_closing_tilt = tilt_position < self._tilt_position
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
self._is_closing = False
self._is_opening = False
if self._position is None:
return
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
self._set_position = None
async def async_stop_cover_tilt(self, **kwargs):
"""Stop the cover tilt."""
if self._tilt_position is None:
return
if self._unsub_listener_cover_tilt is not None:
self._unsub_listener_cover_tilt()
self._unsub_listener_cover_tilt = None
self._set_tilt_position = None
@callback
def _listen_cover(self):
"""Listen for changes in cover."""
if self._unsub_listener_cover is None:
self._unsub_listener_cover = async_track_utc_time_change(
self.hass, self._time_changed_cover
)
async def _time_changed_cover(self, now):
"""Track time changes."""
if self._requested_closing:
self._position -= 10
else:
self._position += 10
if self._position in (100, 0, self._set_position):
await self.async_stop_cover()
self._closed = self.current_cover_position <= 0
self.async_write_ha_state()
@callback
def _listen_cover_tilt(self):
"""Listen for changes in cover tilt."""
if self._unsub_listener_cover_tilt is None:
self._unsub_listener_cover_tilt = async_track_utc_time_change(
self.hass, self._time_changed_cover_tilt
)
async def _time_changed_cover_tilt(self, now):
"""Track time changes."""
if self._requested_closing_tilt:
self._tilt_position -= 10
else:
self._tilt_position += 10
if self._tilt_position in (100, 0, self._set_tilt_position):
await self.async_stop_cover_tilt()
self.async_write_ha_state()
| |
"""
ulmo.twc.kbdi.core
~~~~~~~~~~~~~~~~~~~~~
This module provides direct access to `Texas Weather Connection`_ `Daily
Keetch-Byram Drought Index (KBDI)`_ dataset.
.. _Texas Weather Connection: http://twc.tamu.edu/
.. _Daily Keetch-Byram Drought Index (KBDI): http://twc.tamu.edu/drought/kbdi
"""
import datetime
import os
import numpy as np
import pandas
from ulmo import util
def get_data(county=None, start=None, end=None, as_dataframe=False, data_dir=None):
"""Retreives data.
Parameters
----------
county : ``None`` or str
If specified, results will be limited to the county corresponding to the
given 5-character Texas county fips code i.e. 48???.
end : ``None`` or date (see :ref:`dates-and-times`)
Results will be limited to data on or before this date. Default is the
current date.
start : ``None`` or date (see :ref:`dates-and-times`)
Results will be limited to data on or after this date. Default is the
start of the calendar year for the end date.
as_dataframe: bool
If ``False`` (default), a dict with a nested set of dicts will be
returned with data indexed by 5-character Texas county FIPS code. If ``True``
then a pandas.DataFrame object will be returned. The pandas dataframe
is used internally, so setting this to ``True`` is a little bit faster
as it skips a serialization step.
data_dir : ``None`` or directory path
Directory for holding downloaded data files. If no path is provided
(default), then a user-specific directory for holding application data
will be used (the directory will depend on the platform/operating
system).
Returns
-------
data : dict or pandas.Dataframe
A dict or pandas.DataFrame representing the data. See the
``as_dataframe`` parameter for more.
"""
if end is None:
end_date = datetime.date.today()
else:
end_date = util.convert_date(end)
if start is None:
start_date = datetime.date(end_date.year, 1, 1)
else:
start_date = util.convert_date(start)
if data_dir is None:
data_dir = os.path.join(util.get_ulmo_dir(), 'twc/kbdi')
df = pandas.tools.merge.concat([
_date_dataframe(date, data_dir)
for date in pandas.period_range(start_date, end_date, freq='D')
], ignore_index=True)
fips_df = _fips_dataframe()
df = pandas.merge(df, fips_df, left_on='county', right_on='name')
del df['name']
if county:
df = df[df['fips'] == county]
if as_dataframe:
return df
else:
return _as_data_dict(df)
def _as_data_dict(df):
df['date'] = df['date'].map(str)
county_dict = {}
for county in df['fips'].unique():
county_df = df[df['fips'] == county]
county_data = county_df.T.drop(['fips'])
values = [v.to_dict() for k, v in county_data.iteritems()]
county_dict[county] = values
return county_dict
def _date_dataframe(date, data_dir):
url = _get_data_url(date)
with _open_data_file(url, data_dir) as data_file:
date_df = _parse_data_file(data_file)
date_df['date'] = pandas.Period(date, freq='D')
return date_df
def _fips_dataframe():
# fips codes from http://www.census.gov/geo/www/ansi/national.txt
# with names adjusted to match twc kbdi: DEWITT --> DE WITT
codes = (
('ANDERSON', 48001),
('ANDREWS', 48003),
('ANGELINA', 48005),
('ARANSAS', 48007),
('ARCHER', 48009),
('ARMSTRONG', 48011),
('ATASCOSA', 48013),
('AUSTIN', 48015),
('BAILEY', 48017),
('BANDERA', 48019),
('BASTROP', 48021),
('BAYLOR', 48023),
('BEE', 48025),
('BELL', 48027),
('BEXAR', 48029),
('BLANCO', 48031),
('BORDEN', 48033),
('BOSQUE', 48035),
('BOWIE', 48037),
('BRAZORIA', 48039),
('BRAZOS', 48041),
('BREWSTER', 48043),
('BRISCOE', 48045),
('BROOKS', 48047),
('BROWN', 48049),
('BURLESON', 48051),
('BURNET', 48053),
('CALDWELL', 48055),
('CALHOUN', 48057),
('CALLAHAN', 48059),
('CAMERON', 48061),
('CAMP', 48063),
('CARSON', 48065),
('CASS', 48067),
('CASTRO', 48069),
('CHAMBERS', 48071),
('CHEROKEE', 48073),
('CHILDRESS', 48075),
('CLAY', 48077),
('COCHRAN', 48079),
('COKE', 48081),
('COLEMAN', 48083),
('COLLIN', 48085),
('COLLINGSWORTH', 48087),
('COLORADO', 48089),
('COMAL', 48091),
('COMANCHE', 48093),
('CONCHO', 48095),
('COOKE', 48097),
('CORYELL', 48099),
('COTTLE', 48101),
('CRANE', 48103),
('CROCKETT', 48105),
('CROSBY', 48107),
('CULBERSON', 48109),
('DALLAM', 48111),
('DALLAS', 48113),
('DAWSON', 48115),
('DE WITT', 48123),
('DEAF SMITH', 48117),
('DELTA', 48119),
('DENTON', 48121),
('DEWITT', 48123),
('DICKENS', 48125),
('DIMMIT', 48127),
('DONLEY', 48129),
('DUVAL', 48131),
('EASTLAND', 48133),
('ECTOR', 48135),
('EDWARDS', 48137),
('EL PASO', 48141),
('ELLIS', 48139),
('ERATH', 48143),
('FALLS', 48145),
('FANNIN', 48147),
('FAYETTE', 48149),
('FISHER', 48151),
('FLOYD', 48153),
('FOARD', 48155),
('FORT BEND', 48157),
('FRANKLIN', 48159),
('FREESTONE', 48161),
('FRIO', 48163),
('GAINES', 48165),
('GALVESTON', 48167),
('GARZA', 48169),
('GILLESPIE', 48171),
('GLASSCOCK', 48173),
('GOLIAD', 48175),
('GONZALES', 48177),
('GRAY', 48179),
('GRAYSON', 48181),
('GREGG', 48183),
('GRIMES', 48185),
('GUADALUPE', 48187),
('HALE', 48189),
('HALL', 48191),
('HAMILTON', 48193),
('HANSFORD', 48195),
('HARDEMAN', 48197),
('HARDIN', 48199),
('HARRIS', 48201),
('HARRISON', 48203),
('HARTLEY', 48205),
('HASKELL', 48207),
('HAYS', 48209),
('HEMPHILL', 48211),
('HENDERSON', 48213),
('HIDALGO', 48215),
('HILL', 48217),
('HOCKLEY', 48219),
('HOOD', 48221),
('HOPKINS', 48223),
('HOUSTON', 48225),
('HOWARD', 48227),
('HUDSPETH', 48229),
('HUNT', 48231),
('HUTCHINSON', 48233),
('IRION', 48235),
('JACK', 48237),
('JACKSON', 48239),
('JASPER', 48241),
('JEFF DAVIS', 48243),
('JEFFERSON', 48245),
('JIM HOGG', 48247),
('JIM WELLS', 48249),
('JOHNSON', 48251),
('JONES', 48253),
('KARNES', 48255),
('KAUFMAN', 48257),
('KENDALL', 48259),
('KENEDY', 48261),
('KENT', 48263),
('KERR', 48265),
('KIMBLE', 48267),
('KING', 48269),
('KINNEY', 48271),
('KLEBERG', 48273),
('KNOX', 48275),
('LA SALLE', 48283),
('LAMAR', 48277),
('LAMB', 48279),
('LAMPASAS', 48281),
('LAVACA', 48285),
('LEE', 48287),
('LEON', 48289),
('LIBERTY', 48291),
('LIMESTONE', 48293),
('LIPSCOMB', 48295),
('LIVE OAK', 48297),
('LLANO', 48299),
('LOVING', 48301),
('LUBBOCK', 48303),
('LYNN', 48305),
('MADISON', 48313),
('MARION', 48315),
('MARTIN', 48317),
('MASON', 48319),
('MATAGORDA', 48321),
('MAVERICK', 48323),
('MCCULLOCH', 48307),
('MCLENNAN', 48309),
('MCMULLEN', 48311),
('MEDINA', 48325),
('MENARD', 48327),
('MIDLAND', 48329),
('MILAM', 48331),
('MILLS', 48333),
('MITCHELL', 48335),
('MONTAGUE', 48337),
('MONTGOMERY', 48339),
('MOORE', 48341),
('MORRIS', 48343),
('MOTLEY', 48345),
('NACOGDOCHES', 48347),
('NAVARRO', 48349),
('NEWTON', 48351),
('NOLAN', 48353),
('NUECES', 48355),
('OCHILTREE', 48357),
('OLDHAM', 48359),
('ORANGE', 48361),
('PALO PINTO', 48363),
('PANOLA', 48365),
('PARKER', 48367),
('PARMER', 48369),
('PECOS', 48371),
('POLK', 48373),
('POTTER', 48375),
('PRESIDIO', 48377),
('RAINS', 48379),
('RANDALL', 48381),
('REAGAN', 48383),
('REAL', 48385),
('RED RIVER', 48387),
('REEVES', 48389),
('REFUGIO', 48391),
('ROBERTS', 48393),
('ROBERTSON', 48395),
('ROCKWALL', 48397),
('RUNNELS', 48399),
('RUSK', 48401),
('SABINE', 48403),
('SAN AUGUSTINE', 48405),
('SAN JACINTO', 48407),
('SAN PATRICIO', 48409),
('SAN SABA', 48411),
('SCHLEICHER', 48413),
('SCURRY', 48415),
('SHACKELFORD', 48417),
('SHELBY', 48419),
('SHERMAN', 48421),
('SMITH', 48423),
('SOMERVELL', 48425),
('STARR', 48427),
('STEPHENS', 48429),
('STERLING', 48431),
('STONEWALL', 48433),
('SUTTON', 48435),
('SWISHER', 48437),
('TARRANT', 48439),
('TAYLOR', 48441),
('TERRELL', 48443),
('TERRY', 48445),
('THROCKMORTON', 48447),
('TITUS', 48449),
('TOM GREEN', 48451),
('TRAVIS', 48453),
('TRINITY', 48455),
('TYLER', 48457),
('UPSHUR', 48459),
('UPTON', 48461),
('UVALDE', 48463),
('VAL VERDE', 48465),
('VAN ZANDT', 48467),
('VICTORIA', 48469),
('WALKER', 48471),
('WALLER', 48473),
('WARD', 48475),
('WASHINGTON', 48477),
('WEBB', 48479),
('WHARTON', 48481),
('WHEELER', 48483),
('WICHITA', 48485),
('WILBARGER', 48487),
('WILLACY', 48489),
('WILLIAMSON', 48491),
('WILSON', 48493),
('WINKLER', 48495),
('WISE', 48497),
('WOOD', 48499),
('YOAKUM', 48501),
('YOUNG', 48503),
('ZAPATA', 48505),
('ZAVALA', 48507),
)
df = pandas.DataFrame(np.array(codes))
df = df.rename(columns={0: 'name', 1: 'fips'})
df['fips'] = df['fips'].astype(int)
return df
def _get_data_url(date):
return 'http://twc.tamu.edu/weather_images/summ/summ%s.txt' % date.strftime('%Y%m%d')
def _parse_data_file(data_file):
"""
example:
COUNTY KBDI_AVG KBDI_MAX KBDI_MIN
----------------------------------------------------------------
ANDERSON 262 485 47
ANDREWS 485 614 357
...
"""
dtype = [
('county', '|S15'),
('avg', 'i4'),
('max', 'i4'),
('min', 'i4'),
]
data_array = np.genfromtxt(
data_file, delimiter=[31, 11, 11, 11], dtype=dtype, skip_header=2,
skip_footer=1, autostrip=True)
dataframe = pandas.DataFrame(data_array)
return dataframe
def _open_data_file(url, data_dir):
"""returns an open file handle for a data file; downloading if necessary or
otherwise using a previously downloaded file
"""
file_name = url.rsplit('/', 1)[-1]
file_path = os.path.join(data_dir, file_name)
return util.open_file_for_url(url, file_path, check_modified=True)
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import tempfile
import os
from django import forms
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.conf.urls import patterns, url
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse
from django.contrib.admin import BooleanFieldListFilter
from .models import (Article, Chapter, Account, Media, Child, Parent, Picture,
Widget, DooHickey, Grommet, Whatsit, FancyDoodad, Category, Link,
PrePopulatedPost, PrePopulatedSubPost, CustomArticle, Section,
ModelWithStringPrimaryKey, Color, Thing, Actor, Inquisition, Sketch, Person,
Persona, Subscriber, ExternalSubscriber, OldSubscriber, Vodcast, EmptyModel,
Fabric, Gallery, Language, Recommendation, Recommender, Collector, Post,
Gadget, Villain, SuperVillain, Plot, PlotDetails, CyclicOne, CyclicTwo,
WorkHour, Reservation, FoodDelivery, RowLevelChangePermissionModel, Paper,
CoverLetter, Story, OtherStory, Book, Promo, ChapterXtra1, Pizza, Topping,
Album, Question, Answer, ComplexSortedPerson, PrePopulatedPostLargeSlug,
AdminOrderedField, AdminOrderedModelMethod, AdminOrderedAdminMethod,
AdminOrderedCallable, Report, Color2, UnorderedObject, MainPrepopulated,
RelatedPrepopulated, UndeletableObject, UserMessenger, Simple, Choice,
ShortMessage, Telegram)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
prepopulated_fields = {
'title' : ('content',)
}
fieldsets=(
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('content', 'date', callable_year, 'model_year', 'modeladmin_year')
list_filter = ('date', 'section')
fieldsets=(
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
def changelist_view(self, request):
"Test that extra_context works"
return super(ArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).save_model(request, obj, form, change)
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
def changelist_view(self, request):
"Test that extra_context works"
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected')
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition',)
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super(PersonAdmin, self).get_changelist_formset(request,
formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super(PersonAdmin, self).get_queryset(request).order_by('age')
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = 'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = 'bar'
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from a admin action',
'from@example.com',
['to@example.com']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'from@example.com',
['to@example.com']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail]
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super(ParentAdmin, self).save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super(EmptyModelAdmin, self).get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR']))
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
search_fields = ('=titletranslation__text', '=recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug' : ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug' : ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value', 'multiline',
lambda obj: "foo"
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unkown coolness."
def value(self, instance):
return 1000
def multiline(self, instance):
return "Multiline\ntest\nstring"
value.short_description = 'Value in $US'
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.filter(pk=9999) # Does not exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display=('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances.
Note that the CoverLetter model defines a __unicode__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(CoverLetterAdmin, self).get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(PaperAdmin, self).get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(ShortMessageAdmin, self).get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances.
Note that the Telegram model defines a __unicode__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(TelegramAdmin, self).get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ["-pk"]
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ["-pk"]
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return '<span style="color: #%s;">%s</span>' % ('ff00ff', obj.name)
colored_name.allow_tags = True
colored_name.admin_order_field = 'name'
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug' : ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return patterns('',
url(r'^extra/$',
self.extra,
name='cable_extra'),
)
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['name']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super(UndeletableObjectAdmin, self).change_view(*args, **kwargs)
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown, ]
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
site = admin.AdminSite(name="admin")
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four troublesome (w.r.t escaping
# and calling force_text to avoid problems on Python 2.3) paths through
# contrib.admin.util's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer)
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
# Register core models we need in our tests
from django.contrib.auth.models import User, Group
from django.contrib.auth.admin import UserAdmin, GroupAdmin
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
| |
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from barf.analysis.codeanalyzer import CodeAnalyzer
from barf.analysis.gadget.gadget import GadgetType
from barf.analysis.gadget.gadgetclassifier import GadgetClassifier
from barf.analysis.gadget.gadgetfinder import GadgetFinder
from barf.analysis.gadget.gadgetverifier import GadgetVerifier
from barf.arch import ARCH_ARM
from barf.arch import ARCH_ARM_MODE_32
from barf.arch.arm.armbase import ArmArchitectureInformation
from barf.arch.arm.armdisassembler import ArmDisassembler
from barf.arch.arm.armtranslator import ArmTranslator
from barf.arch.arm.armtranslator import LITE_TRANSLATION
from barf.core.reil import ReilEmulator
from barf.core.reil import ReilImmediateOperand
from barf.core.reil import ReilRegisterOperand
from barf.core.smt.smtlibv2 import Z3Solver as SmtSolver
from barf.core.smt.smttranslator import SmtTranslator
class ArmGadgetClassifierTests(unittest.TestCase):
def setUp(self):
self._arch_info = ArmArchitectureInformation(ARCH_ARM_MODE_32)
self._smt_solver = SmtSolver()
self._smt_translator = SmtTranslator(self._smt_solver, self._arch_info.address_size)
self._ir_emulator = ReilEmulator(self._arch_info.address_size)
self._ir_emulator.set_arch_registers(self._arch_info.registers_gp_all)
self._ir_emulator.set_arch_registers_size(self._arch_info.registers_size)
self._ir_emulator.set_reg_access_mapper(self._arch_info.alias_mapper)
self._smt_translator.set_reg_access_mapper(self._arch_info.alias_mapper)
self._smt_translator.set_arch_registers_size(self._arch_info.registers_size)
self._code_analyzer = CodeAnalyzer(self._smt_solver, self._smt_translator)
self._g_classifier = GadgetClassifier(self._ir_emulator, self._arch_info)
self._g_verifier = GadgetVerifier(self._code_analyzer, self._arch_info)
def _find_and_classify_gadgets(self, binary):
g_finder = GadgetFinder(ArmDisassembler(), binary, ArmTranslator(translation_mode=LITE_TRANSLATION), ARCH_ARM, ARCH_ARM_MODE_32)
g_candidates = g_finder.find(0x00000000, len(binary), instrs_depth=4)
g_classified = self._g_classifier.classify(g_candidates[0])
# Debug:
# self._print_candidates(g_candidates)
# self._print_classified(g_classified)
return g_candidates, g_classified
def test_move_register_1(self):
# testing : dst_reg <- src_reg
binary = "\x04\x00\xa0\xe1" # 0x00 : (4) mov r0, r4
binary += "\x31\xff\x2f\xe1" # 0x04 : (4) blx r1
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 2)
self.assertEquals(g_classified[0].type, GadgetType.MoveRegister)
self.assertEquals(g_classified[0].sources, [ReilRegisterOperand("r4", 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r0", 32)])
self.assertEquals(len(g_classified[0].modified_registers), 1)
self.assertTrue(ReilRegisterOperand("r14", 32) in g_classified[0].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_move_register_2(self):
# testing : dst_reg <- src_reg
binary = "\x00\x00\x84\xe2" # 0x00 : (4) add r0, r4, #0
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.MoveRegister)
self.assertEquals(g_classified[0].sources, [ReilRegisterOperand("r4", 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r0", 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
# TODO: test_move_register_n: mul r0, r4, #1
def test_load_constant_1(self):
# testing : dst_reg <- constant
binary = "\x0a\x20\xa0\xe3" # 0x00 : (4) mov r2, #10
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.LoadConstant)
self.assertEquals(g_classified[0].sources, [ReilImmediateOperand(10, 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r2", 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertFalse(ReilRegisterOperand("r2", 32) in g_classified[0].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_load_constant_2(self):
# testing : dst_reg <- constant
binary = "\x02\x20\x42\xe0" # 0x00 : (4) sub r2, r2, r2
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.LoadConstant)
self.assertEquals(g_classified[0].sources, [ReilImmediateOperand(0, 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r2", 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertFalse(ReilRegisterOperand("r2", 32) in g_classified[0].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_load_constant_3(self):
# testing : dst_reg <- constant
binary = "\x02\x20\x22\xe0" # 0x00 : (4) eor r2, r2, r2
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.LoadConstant)
self.assertEquals(g_classified[0].sources, [ReilImmediateOperand(0, 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r2", 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertFalse(ReilRegisterOperand("r2", 32) in g_classified[0].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_load_constant_4(self):
# testing : dst_reg <- constant
binary = "\x00\x20\x02\xe2" # 0x00 : (4) and r2, r2, #0
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.LoadConstant)
self.assertEquals(g_classified[0].sources, [ReilImmediateOperand(0, 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r2", 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertFalse(ReilRegisterOperand("r2", 32) in g_classified[0].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_load_constant_5(self):
# testing : dst_reg <- constant
binary = "\x00\x20\x02\xe2" # and r2, r2, #0
binary += "\x21\x20\x82\xe3" # orr r2, r2, #33
binary += "\x1e\xff\x2f\xe1" # bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.LoadConstant)
self.assertEquals(g_classified[0].sources, [ReilImmediateOperand(33, 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r2", 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertFalse(ReilRegisterOperand("r2", 32) in g_classified[0].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_arithmetic_add_1(self):
# testing : dst_reg <- src1_reg + src2_reg
binary = "\x08\x00\x84\xe0" # 0x00 : (4) add r0, r4, r8
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.Arithmetic)
self.assertEquals(g_classified[0].sources, [ReilRegisterOperand("r4", 32), ReilRegisterOperand("r8", 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r0", 32)])
self.assertEquals(g_classified[0].operation, "+")
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_arithmetic_sub_1(self):
# testing : dst_reg <- src1_reg + src2_reg
binary = "\x08\x00\x44\xe0" # 0x00 : (4) sub r0, r4, r8
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.Arithmetic)
self.assertEquals(g_classified[0].sources, [ReilRegisterOperand("r4", 32), ReilRegisterOperand("r8", 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r0", 32)])
self.assertEquals(g_classified[0].operation, "-")
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_load_memory_1(self):
# testing : dst_reg <- m[src_reg]
binary = "\x00\x30\x94\xe5" # 0x00 : (4) ldr r3, [r4]
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.LoadMemory)
self.assertEquals(g_classified[0].sources, [ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x0, 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r3", 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_load_memory_2(self):
# testing : dst_reg <- m[src_reg + offset]
binary = "\x33\x30\x94\xe5" # 0x00 : (4) ldr r3, [r4 + 0x33]
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.LoadMemory)
self.assertEquals(g_classified[0].sources, [ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x33, 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r3", 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
# TODO: ARM's ldr rd, [rn, r2] is not a valid classification right now
def test_store_memory_1(self):
# testing : dst_reg <- m[src_reg]
binary = "\x00\x30\x84\xe5" # 0x00 : (4) str r3, [r4]
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.StoreMemory)
self.assertEquals(g_classified[0].sources, [ReilRegisterOperand("r3", 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x0, 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_store_memory_2(self):
# testing : dst_reg <- m[src_reg + offset]
binary = "\x33\x30\x84\xe5" # 0x00 : (4) str r3, [r4 + 0x33]
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 1)
self.assertEquals(g_classified[0].type, GadgetType.StoreMemory)
self.assertEquals(g_classified[0].sources, [ReilRegisterOperand("r3", 32)])
self.assertEquals(g_classified[0].destination, [ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x33, 32)])
self.assertEquals(len(g_classified[0].modified_registers), 0)
self.assertTrue(self._g_verifier.verify(g_classified[0]))
def test_arithmetic_load_add_1(self):
# testing : dst_reg <- dst_reg + mem[src_reg]
binary = "\x00\x30\x94\xe5" # 0x00 : (4) ldr r3, [r4]
binary += "\x03\x00\x80\xe0" # 0x00 : (4) add r0, r0, r3
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 2)
self.assertEquals(g_classified[1].type, GadgetType.ArithmeticLoad)
self.assertEquals(g_classified[1].sources, [ReilRegisterOperand("r0", 32), ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x0, 32)])
self.assertEquals(g_classified[1].destination, [ReilRegisterOperand("r0", 32)])
self.assertEquals(g_classified[1].operation, "+")
self.assertEquals(len(g_classified[1].modified_registers), 1)
self.assertFalse(ReilRegisterOperand("r0", 32) in g_classified[1].modified_registers)
self.assertTrue(ReilRegisterOperand("r3", 32) in g_classified[1].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[1]))
def test_arithmetic_load_add_2(self):
# testing : dst_reg <- dst_reg + mem[src_reg + offset]
binary = "\x22\x30\x94\xe5" # 0x00 : (4) ldr r3, [r4, 0x22]
binary += "\x03\x00\x80\xe0" # 0x00 : (4) add r0, r0, r3
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 2)
self.assertEquals(g_classified[1].type, GadgetType.ArithmeticLoad)
self.assertEquals(g_classified[1].sources, [ReilRegisterOperand("r0", 32), ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x22, 32)])
self.assertEquals(g_classified[1].destination, [ReilRegisterOperand("r0", 32)])
self.assertEquals(g_classified[1].operation, "+")
self.assertEquals(len(g_classified[1].modified_registers), 1)
self.assertFalse(ReilRegisterOperand("r0", 32) in g_classified[1].modified_registers)
self.assertTrue(ReilRegisterOperand("r3", 32) in g_classified[1].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[1]))
def test_arithmetic_store_add_1(self):
# testing : m[dst_reg] <- m[dst_reg] + src_reg
binary = "\x00\x30\x94\xe5" # 0x00 : (4) ldr r3, [r4]
binary += "\x03\x30\x80\xe0" # 0x00 : (4) add r3, r0, r3
binary += "\x00\x30\x84\xe5" # 0x00 : (4) str r3, [r4]
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 2)
self.assertEquals(g_classified[1].type, GadgetType.ArithmeticStore)
self.assertEquals(g_classified[1].sources, [ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x0, 32), ReilRegisterOperand("r0", 32)])
self.assertEquals(g_classified[1].destination, [ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x0, 32)])
self.assertEquals(g_classified[1].operation, "+")
self.assertEquals(len(g_classified[1].modified_registers), 1)
self.assertFalse(ReilRegisterOperand("r4", 32) in g_classified[1].modified_registers)
self.assertTrue(ReilRegisterOperand("r3", 32) in g_classified[1].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[1]))
def test_arithmetic_store_add_2(self):
# testing : dst_reg <- dst_reg + mem[src_reg + offset]
binary = "\x22\x30\x94\xe5" # 0x00 : (4) ldr r3, [r4, 0x22]
binary += "\x03\x30\x80\xe0" # 0x00 : (4) add r3, r0, r3
binary += "\x22\x30\x84\xe5" # 0x00 : (4) str r3, [r4, 0x22]
binary += "\x1e\xff\x2f\xe1" # 0x04 : (4) bx lr
g_candidates, g_classified = self._find_and_classify_gadgets(binary)
self.assertEquals(len(g_candidates), 1)
self.assertEquals(len(g_classified), 2)
self.assertEquals(g_classified[1].type, GadgetType.ArithmeticStore)
self.assertEquals(g_classified[1].sources, [ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x22, 32), ReilRegisterOperand("r0", 32)])
self.assertEquals(g_classified[1].destination, [ReilRegisterOperand("r4", 32), ReilImmediateOperand(0x22, 32)])
self.assertEquals(g_classified[1].operation, "+")
self.assertEquals(len(g_classified[1].modified_registers), 1)
self.assertFalse(ReilRegisterOperand("r4", 32) in g_classified[1].modified_registers)
self.assertTrue(ReilRegisterOperand("r3", 32) in g_classified[1].modified_registers)
self.assertTrue(self._g_verifier.verify(g_classified[1]))
def _print_candidates(self, candidates):
print "Candidates :"
for gadget in candidates:
print gadget
print "-" * 10
def _print_classified(self, classified):
print "Classified :"
for gadget in classified:
print gadget
print gadget.type
print "-" * 10
def main():
unittest.main()
if __name__ == '__main__':
main()
| |
"""Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
import builtins
import re
import sys
from token import *
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
"NL", "untokenize", "ENCODING", "TokenInfo"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'@': AT
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
def _compile(expr):
return re.compile(expr, re.UNICODE)
endpats = {"'": Single, '"': Double,
"'''": Single3, '"""': Double3,
"r'''": Single3, 'r"""': Double3,
"b'''": Single3, 'b"""': Double3,
"R'''": Single3, 'R"""': Double3,
"B'''": Single3, 'B"""': Double3,
"br'''": Single3, 'br"""': Double3,
"bR'''": Single3, 'bR"""': Double3,
"Br'''": Single3, 'Br"""': Double3,
"BR'''": Single3, 'BR"""': Double3,
"rb'''": Single3, 'rb"""': Double3,
"Rb'''": Single3, 'Rb"""': Double3,
"rB'''": Single3, 'rB"""': Double3,
"RB'''": Single3, 'RB"""': Double3,
"u'''": Single3, 'u"""': Double3,
"R'''": Single3, 'R"""': Double3,
"U'''": Single3, 'U"""': Double3,
'r': None, 'R': None, 'b': None, 'B': None,
'u': None, 'U': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',
"rb'''", 'rb"""', "rB'''", 'rB"""',
"Rb'''", 'Rb"""', "RB'''", 'RB"""',
"u'''", 'u"""', "U'''", 'U"""',
):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ,
"rb'", 'rb"', "rB'", 'rB"',
"Rb'", 'Rb"', "RB'", 'RB"' ,
"u'", 'u"', "U'", 'U"',
):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
prevstring = False
for tok in iterable:
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output bytes will tokenize the back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = builtins.open(filename, 'rb')
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
def tokenize(readline):
"""
The tokenize() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
while True: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = _compile(endpats[initial] or
endpats[token[1]] or
endpats[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with builtins.open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except IOError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a class to get process information."""
import collections
import os
import SocketServer
import psutil
from plaso.frontend import rpc_proxy
from plaso.lib import timelib
class ProcessInfo(object):
"""Class that provides information about a running process."""
_MEMORY_INFORMATION = collections.namedtuple(
'memory_information', 'rss vms shared text lib data dirty percent')
def __init__(self, pid=None):
"""Initialize the process information object.
Args:
pid: Process ID (PID) value of the process to monitor. The default value
is None in which case the PID of the calling
process will be used.
Raises:
IOError: If the pid does not exist.
"""
if pid is None:
self._pid = os.getpid()
else:
self._pid = pid
if not psutil.pid_exists(self._pid):
raise IOError(u'Unable to read data from pid: {0:d}'.format(self._pid))
self._command_line = ''
self._parent = None
self._process = psutil.Process(self._pid)
# TODO: Allow the client proxy object to determined at run time and not
# a fixed value as here.
self._rpc_client = rpc_proxy.StandardRpcProxyClient(self._pid)
self._rpc_client.Open()
@property
def pid(self):
"""Return the process ID (PID)."""
return self._pid
@property
def name(self):
"""Return the name of the process."""
return self._process.name
@property
def command_line(self):
"""Return the full command line used to start the process."""
if self._command_line:
return self._command_line
try:
self._command_line = u' '.join(self._process.cmdline)
except psutil.NoSuchProcess:
return
return self._command_line
@property
def parent(self):
"""Return a ProcessInfo object for the parent process."""
if self._parent is not None:
return self._parent
try:
self._parent = ProcessInfo(pid=self._process.parent.pid)
return self._parent
except psutil.NoSuchProcess:
return
@property
def open_files(self):
"""Yield a list of open files the process has."""
try:
for open_file in self._process.get_open_files():
yield open_file.path
except (psutil.AccessDenied, psutil.NoSuchProcess):
return
@property
def children(self):
"""Yield all child processes as a ProcessInfo object."""
try:
for child in self._process.get_children():
yield ProcessInfo(pid=child.pid)
except psutil.NoSuchProcess:
# We are creating an empty generator here. Yield or return None
# individually don't provide that behavior, neither does raising
# GeneratorExit or StopIteration.
# pylint: disable-msg=unreachable
return
yield
@property
def number_of_threads(self):
"""Return back the number of threads this process has."""
try:
return self._process.get_num_threads()
except psutil.NoSuchProcess:
return 0
@property
def memory_map(self):
"""Yield memory map objects (instance of mmap)."""
try:
for memory_map in self._process.get_memory_maps():
yield memory_map
except psutil.NoSuchProcess:
# We are creating an empty generator here. Yield or return None
# individually don't provide that behavior, neither does raising
# GeneratorExit or StopIteration.
# pylint: disable-msg=unreachable
return
yield
@property
def status(self):
"""Return the process status."""
try:
return self._process.status
except psutil.NoSuchProcess:
return u'exited'
@property
def start_time(self):
"""Return back the start time of the process.
Returns:
An integer representing the number of microseconds since Unix Epoch time
in UTC.
"""
return timelib.Timestamp.FromPosixTime(int(self._process.create_time))
@property
def io_counters(self):
"""Return back IO Counters for the process."""
try:
return self._process.get_io_counters()
except psutil.NoSuchProcess:
return
@property
def cpu_times(self):
"""Return back CPU times for the process."""
try:
return self._process.get_cpu_times()
except psutil.NoSuchProcess:
return
@property
def cpu_percent(self):
"""Return back the percent of CPU processing this process consumes."""
try:
return self._process.get_cpu_percent()
except psutil.NoSuchProcess:
return
def GetMemoryInformation(self):
"""Return back memory information as a memory_information object.
Returns:
Memory information object (instance of memory_information) a named
tuple that contains the following attributes: rss, vms, shared, text,
lib, data, dirty, percent.
"""
try:
external_information = self._process.get_ext_memory_info()
except psutil.NoSuchProcess:
return
percent = self._process.get_memory_percent()
# Psutil will return different memory information depending on what is
# available in that platform.
# TODO: Not be as strict in what gets returned, have this object more
# flexible so that the memory information returned reflects the avilable
# information in the platform.
return self._MEMORY_INFORMATION(
getattr(external_information, 'rss', 0),
getattr(external_information, 'vms', 0),
getattr(external_information, 'shared', 0),
getattr(external_information, 'text', 0),
getattr(external_information, 'lib', 0),
getattr(external_information, 'data', 0),
getattr(external_information, 'dirty', 0), percent)
def GetProcessStatus(self):
"""Attempt to connect to process via RPC to gather status information."""
if self._rpc_client is None:
return
try:
status = self._rpc_client.GetData('status')
if isinstance(status, dict):
return status
except SocketServer.socket.error:
return
def IsAlive(self):
"""Return a boolean value indicating if the process is alive or not."""
return self._process.is_running()
def TerminateProcess(self):
"""Terminate the process."""
# TODO: Make sure the process has really been terminated.
if self.IsAlive():
self._process.terminate()
| |
# Copyright 2014 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from os_win import exceptions as os_win_exc
from oslo_config import cfg
from nova import exception
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import volumeops
CONF = cfg.CONF
connection_data = {'volume_id': 'fake_vol_id',
'target_lun': mock.sentinel.fake_lun,
'target_iqn': mock.sentinel.fake_iqn,
'target_portal': mock.sentinel.fake_portal,
'auth_method': 'chap',
'auth_username': mock.sentinel.fake_user,
'auth_password': mock.sentinel.fake_pass}
def get_fake_block_dev_info():
return {'block_device_mapping': [
fake_block_device.AnonFakeDbBlockDeviceDict({'source_type': 'volume'})]
}
def get_fake_connection_info(**kwargs):
return {'data': dict(connection_data, **kwargs),
'serial': mock.sentinel.serial}
class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for VolumeOps class."""
def setUp(self):
super(VolumeOpsTestCase, self).setUp()
self._volumeops = volumeops.VolumeOps()
self._volumeops._volutils = mock.MagicMock()
self._volumeops._vmutils = mock.Mock()
def test_get_volume_driver(self):
fake_conn_info = {'driver_volume_type': mock.sentinel.fake_driver_type}
self._volumeops.volume_drivers[mock.sentinel.fake_driver_type] = (
mock.sentinel.fake_driver)
result = self._volumeops._get_volume_driver(
connection_info=fake_conn_info)
self.assertEqual(mock.sentinel.fake_driver, result)
def test_get_volume_driver_exception(self):
fake_conn_info = {'driver_volume_type': 'fake_driver'}
self.assertRaises(exception.VolumeDriverNotFound,
self._volumeops._get_volume_driver,
connection_info=fake_conn_info)
@mock.patch.object(volumeops.VolumeOps, 'attach_volume')
def test_attach_volumes(self, mock_attach_volume):
block_device_info = get_fake_block_dev_info()
self._volumeops.attach_volumes(block_device_info,
mock.sentinel.instance_name,
ebs_root=True)
mock_attach_volume.assert_called_once_with(
block_device_info['block_device_mapping'][0]['connection_info'],
mock.sentinel.instance_name, True)
def test_fix_instance_volume_disk_paths_empty_bdm(self):
self._volumeops.fix_instance_volume_disk_paths(
mock.sentinel.instance_name,
block_device_info={})
self.assertFalse(
self._volumeops._vmutils.get_vm_physical_disk_mapping.called)
@mock.patch.object(volumeops.VolumeOps, 'get_disk_path_mapping')
def test_fix_instance_volume_disk_paths(self, mock_get_disk_path_mapping):
block_device_info = get_fake_block_dev_info()
mock_disk1 = {
'mounted_disk_path': mock.sentinel.mounted_disk1_path,
'resource_path': mock.sentinel.resource1_path
}
mock_disk2 = {
'mounted_disk_path': mock.sentinel.mounted_disk2_path,
'resource_path': mock.sentinel.resource2_path
}
mock_vm_disk_mapping = {
mock.sentinel.disk1_serial: mock_disk1,
mock.sentinel.disk2_serial: mock_disk2
}
# In this case, only the first disk needs to be updated.
mock_phys_disk_path_mapping = {
mock.sentinel.disk1_serial: mock.sentinel.actual_disk1_path,
mock.sentinel.disk2_serial: mock.sentinel.mounted_disk2_path
}
vmutils = self._volumeops._vmutils
vmutils.get_vm_physical_disk_mapping.return_value = (
mock_vm_disk_mapping)
mock_get_disk_path_mapping.return_value = mock_phys_disk_path_mapping
self._volumeops.fix_instance_volume_disk_paths(
mock.sentinel.instance_name,
block_device_info)
vmutils.get_vm_physical_disk_mapping.assert_called_once_with(
mock.sentinel.instance_name)
mock_get_disk_path_mapping.assert_called_once_with(
block_device_info)
vmutils.set_disk_host_res.assert_called_once_with(
mock.sentinel.resource1_path,
mock.sentinel.actual_disk1_path)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_disconnect_volumes(self, mock_get_volume_driver):
block_device_info = get_fake_block_dev_info()
block_device_mapping = block_device_info['block_device_mapping']
block_device_mapping[0]['connection_info'] = {
'driver_volume_type': mock.sentinel.fake_vol_type}
fake_volume_driver = mock_get_volume_driver.return_value
self._volumeops.disconnect_volumes(block_device_info)
fake_volume_driver.disconnect_volumes.assert_called_once_with(
block_device_mapping)
@mock.patch('nova.block_device.volume_in_mapping')
def test_ebs_root_in_block_devices(self, mock_vol_in_mapping):
block_device_info = get_fake_block_dev_info()
response = self._volumeops.ebs_root_in_block_devices(block_device_info)
mock_vol_in_mapping.assert_called_once_with(
self._volumeops._default_root_device, block_device_info)
self.assertEqual(mock_vol_in_mapping.return_value, response)
def test_get_volume_connector(self):
mock_instance = mock.DEFAULT
initiator = self._volumeops._volutils.get_iscsi_initiator.return_value
expected = {'ip': CONF.my_ip,
'host': CONF.host,
'initiator': initiator}
response = self._volumeops.get_volume_connector(instance=mock_instance)
self._volumeops._volutils.get_iscsi_initiator.assert_called_once_with()
self.assertEqual(expected, response)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_initialize_volumes_connection(self, mock_get_volume_driver):
block_device_info = get_fake_block_dev_info()
self._volumeops.initialize_volumes_connection(block_device_info)
init_vol_conn = (
mock_get_volume_driver.return_value.initialize_volume_connection)
init_vol_conn.assert_called_once_with(
block_device_info['block_device_mapping'][0]['connection_info'])
@mock.patch.object(volumeops.VolumeOps,
'get_mounted_disk_path_from_volume')
def test_get_disk_path_mapping(self, mock_get_disk_path):
block_device_info = get_fake_block_dev_info()
block_device_mapping = block_device_info['block_device_mapping']
fake_conn_info = get_fake_connection_info()
block_device_mapping[0]['connection_info'] = fake_conn_info
mock_get_disk_path.return_value = mock.sentinel.disk_path
resulted_disk_path_mapping = self._volumeops.get_disk_path_mapping(
block_device_info)
mock_get_disk_path.assert_called_once_with(fake_conn_info)
expected_disk_path_mapping = {
mock.sentinel.serial: mock.sentinel.disk_path
}
self.assertEqual(expected_disk_path_mapping,
resulted_disk_path_mapping)
def test_group_block_devices_by_type(self):
block_device_map = get_fake_block_dev_info()['block_device_mapping']
block_device_map[0]['connection_info'] = {
'driver_volume_type': 'iscsi'}
result = self._volumeops._group_block_devices_by_type(
block_device_map)
expected = {'iscsi': [block_device_map[0]]}
self.assertEqual(expected, result)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_get_mounted_disk_path_from_volume(self, mock_get_volume_driver):
fake_conn_info = get_fake_connection_info()
fake_volume_driver = mock_get_volume_driver.return_value
resulted_disk_path = self._volumeops.get_mounted_disk_path_from_volume(
fake_conn_info)
mock_get_volume_driver.assert_called_once_with(
connection_info=fake_conn_info)
get_mounted_disk = fake_volume_driver.get_mounted_disk_path_from_volume
get_mounted_disk.assert_called_once_with(fake_conn_info)
self.assertEqual(get_mounted_disk.return_value,
resulted_disk_path)
class ISCSIVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for Hyper-V ISCSIVolumeDriver class."""
def setUp(self):
super(ISCSIVolumeDriverTestCase, self).setUp()
self._volume_driver = volumeops.ISCSIVolumeDriver()
self._volume_driver._vmutils = mock.MagicMock()
self._volume_driver._volutils = mock.MagicMock()
def test_login_storage_target_auth_exception(self):
connection_info = get_fake_connection_info(
auth_method='fake_auth_method')
self.assertRaises(exception.UnsupportedBDMVolumeAuthMethod,
self._volume_driver.login_storage_target,
connection_info)
@mock.patch.object(volumeops.ISCSIVolumeDriver,
'_get_mounted_disk_from_lun')
def _check_login_storage_target(self, mock_get_mounted_disk_from_lun,
dev_number):
connection_info = get_fake_connection_info()
login_target = self._volume_driver._volutils.login_storage_target
get_number = self._volume_driver._volutils.get_device_number_for_target
get_number.return_value = dev_number
self._volume_driver.login_storage_target(connection_info)
get_number.assert_called_once_with(mock.sentinel.fake_iqn,
mock.sentinel.fake_lun)
if not dev_number:
login_target.assert_called_once_with(
mock.sentinel.fake_lun, mock.sentinel.fake_iqn,
mock.sentinel.fake_portal, mock.sentinel.fake_user,
mock.sentinel.fake_pass)
mock_get_mounted_disk_from_lun.assert_called_once_with(
mock.sentinel.fake_iqn, mock.sentinel.fake_lun, True)
else:
self.assertFalse(login_target.called)
def test_login_storage_target_already_logged(self):
self._check_login_storage_target(dev_number=1)
def test_login_storage_target(self):
self._check_login_storage_target(dev_number=0)
def _check_logout_storage_target(self, disconnected_luns_count=0):
self._volume_driver._volutils.get_target_lun_count.return_value = 1
self._volume_driver.logout_storage_target(
target_iqn=mock.sentinel.fake_iqn,
disconnected_luns_count=disconnected_luns_count)
logout_storage = self._volume_driver._volutils.logout_storage_target
if disconnected_luns_count:
logout_storage.assert_called_once_with(mock.sentinel.fake_iqn)
else:
self.assertFalse(logout_storage.called)
def test_logout_storage_target_skip(self):
self._check_logout_storage_target()
def test_logout_storage_target(self):
self._check_logout_storage_target(disconnected_luns_count=1)
@mock.patch.object(volumeops.ISCSIVolumeDriver,
'_get_mounted_disk_from_lun')
def test_get_mounted_disk_path_from_volume(self,
mock_get_mounted_disk_from_lun):
connection_info = get_fake_connection_info()
resulted_disk_path = (
self._volume_driver.get_mounted_disk_path_from_volume(
connection_info))
mock_get_mounted_disk_from_lun.assert_called_once_with(
connection_info['data']['target_iqn'],
connection_info['data']['target_lun'],
wait_for_device=True)
self.assertEqual(mock_get_mounted_disk_from_lun.return_value,
resulted_disk_path)
@mock.patch.object(volumeops.ISCSIVolumeDriver,
'_get_mounted_disk_from_lun')
@mock.patch.object(volumeops.ISCSIVolumeDriver, 'logout_storage_target')
@mock.patch.object(volumeops.ISCSIVolumeDriver, 'login_storage_target')
def test_attach_volume_exception(self, mock_login_storage_target,
mock_logout_storage_target,
mock_get_mounted_disk):
connection_info = get_fake_connection_info()
mock_get_mounted_disk.side_effect = os_win_exc.HyperVException
self.assertRaises(os_win_exc.HyperVException,
self._volume_driver.attach_volume, connection_info,
mock.sentinel.instance_name)
mock_logout_storage_target.assert_called_with(mock.sentinel.fake_iqn)
@mock.patch.object(volumeops.ISCSIVolumeDriver,
'_get_mounted_disk_from_lun')
@mock.patch.object(volumeops.ISCSIVolumeDriver, 'login_storage_target')
def _check_attach_volume(self, mock_login_storage_target,
mock_get_mounted_disk_from_lun, ebs_root):
connection_info = get_fake_connection_info()
get_ide_path = self._volume_driver._vmutils.get_vm_ide_controller
get_scsi_path = self._volume_driver._vmutils.get_vm_scsi_controller
fake_ide_path = get_ide_path.return_value
fake_scsi_path = get_scsi_path.return_value
fake_mounted_disk_path = mock_get_mounted_disk_from_lun.return_value
attach_vol = self._volume_driver._vmutils.attach_volume_to_controller
get_free_slot = self._volume_driver._vmutils.get_free_controller_slot
get_free_slot.return_value = 1
self._volume_driver.attach_volume(
connection_info=connection_info,
instance_name=mock.sentinel.instance_name,
ebs_root=ebs_root)
mock_login_storage_target.assert_called_once_with(connection_info)
mock_get_mounted_disk_from_lun.assert_called_once_with(
mock.sentinel.fake_iqn,
mock.sentinel.fake_lun,
wait_for_device=True)
if ebs_root:
get_ide_path.assert_called_once_with(
mock.sentinel.instance_name, 0)
attach_vol.assert_called_once_with(mock.sentinel.instance_name,
fake_ide_path, 0,
fake_mounted_disk_path,
serial=mock.sentinel.serial)
else:
get_scsi_path.assert_called_once_with(mock.sentinel.instance_name)
get_free_slot.assert_called_once_with(fake_scsi_path)
attach_vol.assert_called_once_with(mock.sentinel.instance_name,
fake_scsi_path, 1,
fake_mounted_disk_path,
serial=mock.sentinel.serial)
def test_attach_volume_ebs(self):
self._check_attach_volume(ebs_root=True)
def test_attach_volume(self):
self._check_attach_volume(ebs_root=False)
@mock.patch.object(volumeops.ISCSIVolumeDriver,
'_get_mounted_disk_from_lun')
@mock.patch.object(volumeops.ISCSIVolumeDriver, 'logout_storage_target')
def test_detach_volume(self, mock_logout_storage_target,
mock_get_mounted_disk_from_lun):
connection_info = get_fake_connection_info()
self._volume_driver.detach_volume(connection_info,
mock.sentinel.instance_name)
mock_get_mounted_disk_from_lun.assert_called_once_with(
mock.sentinel.fake_iqn,
mock.sentinel.fake_lun,
wait_for_device=True)
self._volume_driver._vmutils.detach_vm_disk.assert_called_once_with(
mock.sentinel.instance_name,
mock_get_mounted_disk_from_lun.return_value)
mock_logout_storage_target.assert_called_once_with(
mock.sentinel.fake_iqn)
def test_get_mounted_disk_from_lun(self):
with test.nested(
mock.patch.object(self._volume_driver._volutils,
'get_device_number_for_target'),
mock.patch.object(self._volume_driver._vmutils,
'get_mounted_disk_by_drive_number')
) as (mock_get_device_number_for_target,
mock_get_mounted_disk_by_drive_number):
mock_get_device_number_for_target.return_value = 0
mock_get_mounted_disk_by_drive_number.return_value = (
mock.sentinel.disk_path)
disk = self._volume_driver._get_mounted_disk_from_lun(
mock.sentinel.target_iqn,
mock.sentinel.target_lun)
self.assertEqual(mock.sentinel.disk_path, disk)
def test_get_target_from_disk_path(self):
result = self._volume_driver.get_target_from_disk_path(
mock.sentinel.physical_drive_path)
mock_get_target = (
self._volume_driver._volutils.get_target_from_disk_path)
mock_get_target.assert_called_once_with(
mock.sentinel.physical_drive_path)
self.assertEqual(mock_get_target.return_value, result)
@mock.patch('time.sleep')
def test_get_mounted_disk_from_lun_failure(self, fake_sleep):
self.flags(mounted_disk_query_retry_count=1, group='hyperv')
with mock.patch.object(self._volume_driver._volutils,
'get_device_number_for_target') as m_device_num:
m_device_num.side_effect = [None, -1]
self.assertRaises(exception.NotFound,
self._volume_driver._get_mounted_disk_from_lun,
mock.sentinel.target_iqn,
mock.sentinel.target_lun)
@mock.patch.object(volumeops.ISCSIVolumeDriver, 'logout_storage_target')
def test_disconnect_volumes(self, mock_logout_storage_target):
block_device_info = get_fake_block_dev_info()
connection_info = get_fake_connection_info()
block_device_mapping = block_device_info['block_device_mapping']
block_device_mapping[0]['connection_info'] = connection_info
self._volume_driver.disconnect_volumes(block_device_mapping)
mock_logout_storage_target.assert_called_once_with(
mock.sentinel.fake_iqn, 1)
def test_get_target_lun_count(self):
result = self._volume_driver.get_target_lun_count(
mock.sentinel.target_iqn)
mock_get_lun_count = self._volume_driver._volutils.get_target_lun_count
mock_get_lun_count.assert_called_once_with(mock.sentinel.target_iqn)
self.assertEqual(mock_get_lun_count.return_value, result)
@mock.patch.object(volumeops.ISCSIVolumeDriver, 'login_storage_target')
def test_initialize_volume_connection(self, mock_login_storage_target):
self._volume_driver.initialize_volume_connection(
mock.sentinel.connection_info)
mock_login_storage_target.assert_called_once_with(
mock.sentinel.connection_info)
class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V SMBFSVolumeDriver class."""
_FAKE_SHARE = '//1.2.3.4/fake_share'
_FAKE_SHARE_NORMALIZED = _FAKE_SHARE.replace('/', '\\')
_FAKE_DISK_NAME = 'fake_volume_name.vhdx'
_FAKE_USERNAME = 'fake_username'
_FAKE_PASSWORD = 'fake_password'
_FAKE_SMB_OPTIONS = '-o username=%s,password=%s' % (_FAKE_USERNAME,
_FAKE_PASSWORD)
_FAKE_CONNECTION_INFO = {'data': {'export': _FAKE_SHARE,
'name': _FAKE_DISK_NAME,
'options': _FAKE_SMB_OPTIONS,
'volume_id': 'fake_vol_id'}}
def setUp(self):
super(SMBFSVolumeDriverTestCase, self).setUp()
self._volume_driver = volumeops.SMBFSVolumeDriver()
self._volume_driver._vmutils = mock.MagicMock()
self._volume_driver._smbutils = mock.MagicMock()
self._volume_driver._volutils = mock.MagicMock()
@mock.patch.object(volumeops.SMBFSVolumeDriver,
'_get_disk_path')
def test_get_mounted_disk_path_from_volume(self, mock_get_disk_path):
disk_path = self._volume_driver.get_mounted_disk_path_from_volume(
mock.sentinel.conn_info)
self.assertEqual(mock_get_disk_path.return_value, disk_path)
mock_get_disk_path.assert_called_once_with(mock.sentinel.conn_info)
@mock.patch.object(volumeops.SMBFSVolumeDriver, 'ensure_share_mounted')
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path')
def _check_attach_volume(self, mock_get_disk_path,
mock_ensure_share_mounted, ebs_root=False):
mock_get_disk_path.return_value = mock.sentinel.disk_path
self._volume_driver.attach_volume(
self._FAKE_CONNECTION_INFO,
mock.sentinel.instance_name,
ebs_root)
if ebs_root:
get_vm_ide_controller = (
self._volume_driver._vmutils.get_vm_ide_controller)
get_vm_ide_controller.assert_called_once_with(
mock.sentinel.instance_name, 0)
ctrller_path = get_vm_ide_controller.return_value
slot = 0
else:
get_vm_scsi_controller = (
self._volume_driver._vmutils.get_vm_scsi_controller)
get_vm_scsi_controller.assert_called_once_with(
mock.sentinel.instance_name)
get_free_controller_slot = (
self._volume_driver._vmutils.get_free_controller_slot)
get_free_controller_slot.assert_called_once_with(
get_vm_scsi_controller.return_value)
ctrller_path = get_vm_scsi_controller.return_value
slot = get_free_controller_slot.return_value
mock_ensure_share_mounted.assert_called_once_with(
self._FAKE_CONNECTION_INFO)
mock_get_disk_path.assert_called_once_with(self._FAKE_CONNECTION_INFO)
self._volume_driver._vmutils.attach_drive.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.disk_path,
ctrller_path, slot)
def test_attach_volume_ide(self):
self._check_attach_volume(ebs_root=True)
def test_attach_volume_scsi(self):
self._check_attach_volume()
@mock.patch.object(volumeops.SMBFSVolumeDriver, 'ensure_share_mounted')
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path')
def test_attach_non_existing_image(self, mock_get_disk_path,
mock_ensure_share_mounted):
self._volume_driver._vmutils.attach_drive.side_effect = (
os_win_exc.HyperVException)
self.assertRaises(exception.VolumeAttachFailed,
self._volume_driver.attach_volume,
self._FAKE_CONNECTION_INFO,
mock.sentinel.instance_name)
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path')
def test_detach_volume(self, mock_get_disk_path):
mock_get_disk_path.return_value = (
mock.sentinel.disk_path)
self._volume_driver.detach_volume(self._FAKE_CONNECTION_INFO,
mock.sentinel.instance_name)
self._volume_driver._vmutils.detach_vm_disk.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.disk_path,
is_physical=False)
def test_parse_credentials(self):
username, password = self._volume_driver._parse_credentials(
self._FAKE_SMB_OPTIONS)
self.assertEqual(self._FAKE_USERNAME, username)
self.assertEqual(self._FAKE_PASSWORD, password)
def test_get_export_path(self):
result = self._volume_driver._get_export_path(
self._FAKE_CONNECTION_INFO)
expected = self._FAKE_SHARE.replace('/', '\\')
self.assertEqual(expected, result)
def test_get_disk_path(self):
expected = os.path.join(self._FAKE_SHARE_NORMALIZED,
self._FAKE_DISK_NAME)
disk_path = self._volume_driver._get_disk_path(
self._FAKE_CONNECTION_INFO)
self.assertEqual(expected, disk_path)
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_parse_credentials')
def _test_ensure_mounted(self, mock_parse_credentials, is_mounted=False):
mock_mount_smb_share = self._volume_driver._smbutils.mount_smb_share
self._volume_driver._smbutils.check_smb_mapping.return_value = (
is_mounted)
mock_parse_credentials.return_value = (
self._FAKE_USERNAME, self._FAKE_PASSWORD)
self._volume_driver.ensure_share_mounted(
self._FAKE_CONNECTION_INFO)
if is_mounted:
self.assertFalse(
mock_mount_smb_share.called)
else:
mock_mount_smb_share.assert_called_once_with(
self._FAKE_SHARE_NORMALIZED,
username=self._FAKE_USERNAME,
password=self._FAKE_PASSWORD)
def test_ensure_mounted_new_share(self):
self._test_ensure_mounted()
def test_ensure_already_mounted(self):
self._test_ensure_mounted(is_mounted=True)
def test_disconnect_volumes(self):
block_device_mapping = [
{'connection_info': self._FAKE_CONNECTION_INFO}]
self._volume_driver.disconnect_volumes(block_device_mapping)
mock_unmount_share = self._volume_driver._smbutils.unmount_smb_share
mock_unmount_share.assert_called_once_with(
self._FAKE_SHARE_NORMALIZED)
| |
# -*- encoding: utf-8
from sqlalchemy.testing import eq_, engines, pickleable, assert_raises_message
import datetime
import os
from sqlalchemy import Table, Column, MetaData, Float, \
Integer, String, Boolean, TIMESTAMP, Sequence, Numeric, select, \
Date, Time, DateTime, DefaultClause, PickleType, text, Text, \
UnicodeText, LargeBinary
from sqlalchemy import types, schema
from sqlalchemy.databases import mssql
from sqlalchemy.dialects.mssql.base import TIME, _MSDate
from sqlalchemy.dialects.mssql.base import MS_2005_VERSION, MS_2008_VERSION
from sqlalchemy.testing import fixtures, \
AssertsExecutionResults, ComparesTables
from sqlalchemy import testing
from sqlalchemy.testing import emits_warning_on
import decimal
from sqlalchemy.util import b
class TimeTypeTest(fixtures.TestBase):
def test_result_processor_no_microseconds(self):
expected = datetime.time(12, 34, 56)
self._assert_result_processor(expected, '12:34:56')
def test_result_processor_too_many_microseconds(self):
# microsecond must be in 0..999999, should truncate (6 vs 7 digits)
expected = datetime.time(12, 34, 56, 123456)
self._assert_result_processor(expected, '12:34:56.1234567')
def _assert_result_processor(self, expected, value):
mssql_time_type = TIME()
result_processor = mssql_time_type.result_processor(None, None)
eq_(expected, result_processor(value))
def test_result_processor_invalid(self):
mssql_time_type = TIME()
result_processor = mssql_time_type.result_processor(None, None)
assert_raises_message(
ValueError,
"could not parse 'abc' as a time value",
result_processor, 'abc'
)
class MSDateTypeTest(fixtures.TestBase):
def test_result_processor(self):
expected = datetime.date(2000, 1, 2)
self._assert_result_processor(expected, '2000-01-02')
def _assert_result_processor(self, expected, value):
mssql_date_type = _MSDate()
result_processor = mssql_date_type.result_processor(None, None)
eq_(expected, result_processor(value))
def test_result_processor_invalid(self):
mssql_date_type = _MSDate()
result_processor = mssql_date_type.result_processor(None, None)
assert_raises_message(
ValueError,
"could not parse 'abc' as a date value",
result_processor, 'abc'
)
class TypeDDLTest(fixtures.TestBase):
def test_boolean(self):
"Exercise type specification for boolean type."
columns = [
# column type, args, kwargs, expected ddl
(Boolean, [], {},
'BIT'),
]
metadata = MetaData()
table_args = ['test_mssql_boolean', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column('c%s' % index, type_(*args, **kw), nullable=None))
boolean_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(boolean_table))
for col in boolean_table.c:
index = int(col.name[1:])
testing.eq_(
gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
def test_numeric(self):
"Exercise type specification and options for numeric types."
columns = [
# column type, args, kwargs, expected ddl
(types.NUMERIC, [], {},
'NUMERIC'),
(types.NUMERIC, [None], {},
'NUMERIC'),
(types.NUMERIC, [12, 4], {},
'NUMERIC(12, 4)'),
(types.Float, [], {},
'FLOAT'),
(types.Float, [None], {},
'FLOAT'),
(types.Float, [12], {},
'FLOAT(12)'),
(mssql.MSReal, [], {},
'REAL'),
(types.Integer, [], {},
'INTEGER'),
(types.BigInteger, [], {},
'BIGINT'),
(mssql.MSTinyInteger, [], {},
'TINYINT'),
(types.SmallInteger, [], {},
'SMALLINT'),
]
metadata = MetaData()
table_args = ['test_mssql_numeric', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column('c%s' % index, type_(*args, **kw), nullable=None))
numeric_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(numeric_table))
for col in numeric_table.c:
index = int(col.name[1:])
testing.eq_(
gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
def test_char(self):
"""Exercise COLLATE-ish options on string types."""
columns = [
(mssql.MSChar, [], {},
'CHAR'),
(mssql.MSChar, [1], {},
'CHAR(1)'),
(mssql.MSChar, [1], {'collation': 'Latin1_General_CI_AS'},
'CHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSNChar, [], {},
'NCHAR'),
(mssql.MSNChar, [1], {},
'NCHAR(1)'),
(mssql.MSNChar, [1], {'collation': 'Latin1_General_CI_AS'},
'NCHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSString, [], {},
'VARCHAR(max)'),
(mssql.MSString, [1], {},
'VARCHAR(1)'),
(mssql.MSString, [1], {'collation': 'Latin1_General_CI_AS'},
'VARCHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSNVarchar, [], {},
'NVARCHAR(max)'),
(mssql.MSNVarchar, [1], {},
'NVARCHAR(1)'),
(mssql.MSNVarchar, [1], {'collation': 'Latin1_General_CI_AS'},
'NVARCHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSText, [], {},
'TEXT'),
(mssql.MSText, [], {'collation': 'Latin1_General_CI_AS'},
'TEXT COLLATE Latin1_General_CI_AS'),
(mssql.MSNText, [], {},
'NTEXT'),
(mssql.MSNText, [], {'collation': 'Latin1_General_CI_AS'},
'NTEXT COLLATE Latin1_General_CI_AS'),
]
metadata = MetaData()
table_args = ['test_mssql_charset', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column('c%s' % index, type_(*args, **kw), nullable=None))
charset_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(charset_table))
for col in charset_table.c:
index = int(col.name[1:])
testing.eq_(
gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
def test_dates(self):
"Exercise type specification for date types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSDateTime, [], {},
'DATETIME', None),
(types.DATE, [], {},
'DATE', None),
(types.Date, [], {},
'DATE', None),
(types.Date, [], {},
'DATETIME', MS_2005_VERSION),
(mssql.MSDate, [], {},
'DATE', None),
(mssql.MSDate, [], {},
'DATETIME', MS_2005_VERSION),
(types.TIME, [], {},
'TIME', None),
(types.Time, [], {},
'TIME', None),
(mssql.MSTime, [], {},
'TIME', None),
(mssql.MSTime, [1], {},
'TIME(1)', None),
(types.Time, [], {},
'DATETIME', MS_2005_VERSION),
(mssql.MSTime, [], {},
'TIME', None),
(mssql.MSSmallDateTime, [], {},
'SMALLDATETIME', None),
(mssql.MSDateTimeOffset, [], {},
'DATETIMEOFFSET', None),
(mssql.MSDateTimeOffset, [1], {},
'DATETIMEOFFSET(1)', None),
(mssql.MSDateTime2, [], {},
'DATETIME2', None),
(mssql.MSDateTime2, [0], {},
'DATETIME2(0)', None),
(mssql.MSDateTime2, [1], {},
'DATETIME2(1)', None),
(mssql.MSTime, [0], {},
'TIME(0)', None),
(mssql.MSDateTimeOffset, [0], {},
'DATETIMEOFFSET(0)', None),
]
metadata = MetaData()
table_args = ['test_mssql_dates', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res, server_version = spec
table_args.append(
Column('c%s' % index, type_(*args, **kw), nullable=None))
date_table = Table(*table_args)
dialect = mssql.dialect()
dialect.server_version_info = MS_2008_VERSION
ms_2005_dialect = mssql.dialect()
ms_2005_dialect.server_version_info = MS_2005_VERSION
gen = dialect.ddl_compiler(dialect, schema.CreateTable(date_table))
gen2005 = ms_2005_dialect.ddl_compiler(
ms_2005_dialect, schema.CreateTable(date_table))
for col in date_table.c:
index = int(col.name[1:])
server_version = columns[index][4]
if not server_version:
testing.eq_(
gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
else:
testing.eq_(
gen2005.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
def test_large_type_deprecation(self):
d1 = mssql.dialect(deprecate_large_types=True)
d2 = mssql.dialect(deprecate_large_types=False)
d3 = mssql.dialect()
d3.server_version_info = (11, 0)
d3._setup_version_attributes()
d4 = mssql.dialect()
d4.server_version_info = (10, 0)
d4._setup_version_attributes()
for dialect in (d1, d3):
eq_(
str(Text().compile(dialect=dialect)),
"VARCHAR(max)"
)
eq_(
str(UnicodeText().compile(dialect=dialect)),
"NVARCHAR(max)"
)
eq_(
str(LargeBinary().compile(dialect=dialect)),
"VARBINARY(max)"
)
for dialect in (d2, d4):
eq_(
str(Text().compile(dialect=dialect)),
"TEXT"
)
eq_(
str(UnicodeText().compile(dialect=dialect)),
"NTEXT"
)
eq_(
str(LargeBinary().compile(dialect=dialect)),
"IMAGE"
)
def test_timestamp(self):
"""Exercise TIMESTAMP column."""
dialect = mssql.dialect()
metadata = MetaData()
spec, expected = (TIMESTAMP, 'TIMESTAMP')
t = Table(
'mssql_ts', metadata,
Column('id', Integer, primary_key=True),
Column('t', spec, nullable=None))
gen = dialect.ddl_compiler(dialect, schema.CreateTable(t))
testing.eq_(gen.get_column_specification(t.c.t), "t %s" % expected)
self.assert_(repr(t.c.t))
def test_money(self):
"""Exercise type specification for money types."""
columns = [(mssql.MSMoney, [], {}, 'MONEY'),
(mssql.MSSmallMoney, [], {}, 'SMALLMONEY')]
metadata = MetaData()
table_args = ['test_mssql_money', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw),
nullable=None))
money_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect,
schema.CreateTable(money_table))
for col in money_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col), '%s %s'
% (col.name, columns[index][3]))
self.assert_(repr(col))
def test_binary(self):
"Exercise type specification for binary types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSBinary, [], {},
'BINARY'),
(mssql.MSBinary, [10], {},
'BINARY(10)'),
(types.BINARY, [], {},
'BINARY'),
(types.BINARY, [10], {},
'BINARY(10)'),
(mssql.MSVarBinary, [], {},
'VARBINARY(max)'),
(mssql.MSVarBinary, [10], {},
'VARBINARY(10)'),
(types.VARBINARY, [10], {},
'VARBINARY(10)'),
(types.VARBINARY, [], {},
'VARBINARY(max)'),
(mssql.MSImage, [], {},
'IMAGE'),
(mssql.IMAGE, [], {},
'IMAGE'),
(types.LargeBinary, [], {},
'IMAGE'),
]
metadata = MetaData()
table_args = ['test_mssql_binary', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw),
nullable=None))
binary_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect,
schema.CreateTable(binary_table))
for col in binary_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col), '%s %s'
% (col.name, columns[index][3]))
self.assert_(repr(col))
metadata = None
class TypeRoundTripTest(
fixtures.TestBase, AssertsExecutionResults, ComparesTables):
__only_on__ = 'mssql'
@classmethod
def setup_class(cls):
global metadata
metadata = MetaData(testing.db)
def teardown(self):
metadata.drop_all()
@testing.fails_on_everything_except('mssql+pyodbc')
def test_decimal_notation(self):
numeric_table = Table(
'numeric_table', metadata,
Column(
'id', Integer,
Sequence('numeric_id_seq', optional=True), primary_key=True),
Column(
'numericcol',
Numeric(precision=38, scale=20, asdecimal=True)))
metadata.create_all()
test_items = [decimal.Decimal(d) for d in (
'1500000.00000000000000000000',
'-1500000.00000000000000000000',
'1500000',
'0.0000000000000000002',
'0.2',
'-0.0000000000000000002',
'-2E-2',
'156666.458923543',
'-156666.458923543',
'1',
'-1',
'-1234',
'1234',
'2E-12',
'4E8',
'3E-6',
'3E-7',
'4.1',
'1E-1',
'1E-2',
'1E-3',
'1E-4',
'1E-5',
'1E-6',
'1E-7',
'1E-1',
'1E-8',
'0.2732E2',
'-0.2432E2',
'4.35656E2',
'-02452E-2',
'45125E-2',
'1234.58965E-2',
'1.521E+15',
'-1E-25',
'1E-25',
'1254E-25',
'-1203E-25',
'0',
'-0.00',
'-0',
'4585E12',
'000000000000000000012',
'000000000000.32E12',
'00000000000000.1E+12',
'000000000000.2E-32',
)]
for value in test_items:
numeric_table.insert().execute(numericcol=value)
for value in select([numeric_table.c.numericcol]).execute():
assert value[0] in test_items, "%r not in test_items" % value[0]
def test_float(self):
float_table = Table(
'float_table', metadata,
Column(
'id', Integer,
Sequence('numeric_id_seq', optional=True), primary_key=True),
Column('floatcol', Float()))
metadata.create_all()
try:
test_items = [float(d) for d in (
'1500000.00000000000000000000',
'-1500000.00000000000000000000',
'1500000',
'0.0000000000000000002',
'0.2',
'-0.0000000000000000002',
'156666.458923543',
'-156666.458923543',
'1',
'-1',
'1234',
'2E-12',
'4E8',
'3E-6',
'3E-7',
'4.1',
'1E-1',
'1E-2',
'1E-3',
'1E-4',
'1E-5',
'1E-6',
'1E-7',
'1E-8',
)]
for value in test_items:
float_table.insert().execute(floatcol=value)
except Exception as e:
raise e
# todo this should suppress warnings, but it does not
@emits_warning_on('mssql+mxodbc', r'.*does not have any indexes.*')
def test_dates(self):
"Exercise type specification for date types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSDateTime, [], {},
'DATETIME', []),
(types.DATE, [], {},
'DATE', ['>=', (10,)]),
(types.Date, [], {},
'DATE', ['>=', (10,)]),
(types.Date, [], {},
'DATETIME', ['<', (10,)], mssql.MSDateTime),
(mssql.MSDate, [], {},
'DATE', ['>=', (10,)]),
(mssql.MSDate, [], {},
'DATETIME', ['<', (10,)], mssql.MSDateTime),
(types.TIME, [], {},
'TIME', ['>=', (10,)]),
(types.Time, [], {},
'TIME', ['>=', (10,)]),
(mssql.MSTime, [], {},
'TIME', ['>=', (10,)]),
(mssql.MSTime, [1], {},
'TIME(1)', ['>=', (10,)]),
(types.Time, [], {},
'DATETIME', ['<', (10,)], mssql.MSDateTime),
(mssql.MSTime, [], {},
'TIME', ['>=', (10,)]),
(mssql.MSSmallDateTime, [], {},
'SMALLDATETIME', []),
(mssql.MSDateTimeOffset, [], {},
'DATETIMEOFFSET', ['>=', (10,)]),
(mssql.MSDateTimeOffset, [1], {},
'DATETIMEOFFSET(1)', ['>=', (10,)]),
(mssql.MSDateTime2, [], {},
'DATETIME2', ['>=', (10,)]),
(mssql.MSDateTime2, [0], {},
'DATETIME2(0)', ['>=', (10,)]),
(mssql.MSDateTime2, [1], {},
'DATETIME2(1)', ['>=', (10,)]),
]
table_args = ['test_mssql_dates', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res, requires = spec[0:5]
if requires and \
testing._is_excluded('mssql', *requires) or not requires:
c = Column('c%s' % index, type_(*args, **kw), nullable=None)
testing.db.dialect.type_descriptor(c.type)
table_args.append(c)
dates_table = Table(*table_args)
gen = testing.db.dialect.ddl_compiler(
testing.db.dialect,
schema.CreateTable(dates_table))
for col in dates_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col), '%s %s'
% (col.name, columns[index][3]))
self.assert_(repr(col))
dates_table.create(checkfirst=True)
reflected_dates = Table('test_mssql_dates',
MetaData(testing.db), autoload=True)
for col in reflected_dates.c:
self.assert_types_base(col, dates_table.c[col.key])
def test_date_roundtrip(self):
t = Table(
'test_dates', metadata,
Column('id', Integer,
Sequence('datetest_id_seq', optional=True),
primary_key=True),
Column('adate', Date),
Column('atime', Time),
Column('adatetime', DateTime))
metadata.create_all()
d1 = datetime.date(2007, 10, 30)
t1 = datetime.time(11, 2, 32)
d2 = datetime.datetime(2007, 10, 30, 11, 2, 32)
t.insert().execute(adate=d1, adatetime=d2, atime=t1)
t.insert().execute(adate=d2, adatetime=d2, atime=d2)
x = t.select().execute().fetchall()[0]
self.assert_(x.adate.__class__ == datetime.date)
self.assert_(x.atime.__class__ == datetime.time)
self.assert_(x.adatetime.__class__ == datetime.datetime)
t.delete().execute()
t.insert().execute(adate=d1, adatetime=d2, atime=t1)
eq_(select([t.c.adate, t.c.atime, t.c.adatetime], t.c.adate
== d1).execute().fetchall(), [(d1, t1, d2)])
@emits_warning_on('mssql+mxodbc', r'.*does not have any indexes.*')
@testing.provide_metadata
def _test_binary_reflection(self, deprecate_large_types):
"Exercise type specification for binary types."
columns = [
# column type, args, kwargs, expected ddl from reflected
(mssql.MSBinary, [], {},
'BINARY(1)'),
(mssql.MSBinary, [10], {},
'BINARY(10)'),
(types.BINARY, [], {},
'BINARY(1)'),
(types.BINARY, [10], {},
'BINARY(10)'),
(mssql.MSVarBinary, [], {},
'VARBINARY(max)'),
(mssql.MSVarBinary, [10], {},
'VARBINARY(10)'),
(types.VARBINARY, [10], {},
'VARBINARY(10)'),
(types.VARBINARY, [], {},
'VARBINARY(max)'),
(mssql.MSImage, [], {},
'IMAGE'),
(mssql.IMAGE, [], {},
'IMAGE'),
(types.LargeBinary, [], {},
'IMAGE' if not deprecate_large_types else 'VARBINARY(max)'),
]
metadata = self.metadata
metadata.bind = engines.testing_engine(
options={"deprecate_large_types": deprecate_large_types})
table_args = ['test_mssql_binary', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw),
nullable=None))
binary_table = Table(*table_args)
metadata.create_all()
reflected_binary = Table('test_mssql_binary',
MetaData(testing.db), autoload=True)
for col, spec in zip(reflected_binary.c, columns):
eq_(
str(col.type), spec[3],
"column %s %s != %s" % (col.key, str(col.type), spec[3])
)
c1 = testing.db.dialect.type_descriptor(col.type).__class__
c2 = \
testing.db.dialect.type_descriptor(
binary_table.c[col.name].type).__class__
assert issubclass(c1, c2), \
'column %s: %r is not a subclass of %r' \
% (col.key, c1, c2)
if binary_table.c[col.name].type.length:
testing.eq_(col.type.length,
binary_table.c[col.name].type.length)
def test_binary_reflection_legacy_large_types(self):
self._test_binary_reflection(False)
@testing.only_on('mssql >= 11')
def test_binary_reflection_sql2012_large_types(self):
self._test_binary_reflection(True)
def test_autoincrement(self):
Table(
'ai_1', metadata,
Column('int_y', Integer, primary_key=True),
Column(
'int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table(
'ai_2', metadata,
Column('int_y', Integer, primary_key=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table(
'ai_3', metadata,
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
Column('int_y', Integer, primary_key=True))
Table(
'ai_4', metadata,
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
Column('int_n2', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table(
'ai_5', metadata,
Column('int_y', Integer, primary_key=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table(
'ai_6', metadata,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('int_y', Integer, primary_key=True))
Table(
'ai_7', metadata,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('o2', String(1), DefaultClause('x'),
primary_key=True),
Column('int_y', Integer, primary_key=True))
Table(
'ai_8', metadata,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('o2', String(1), DefaultClause('x'),
primary_key=True))
metadata.create_all()
table_names = ['ai_1', 'ai_2', 'ai_3', 'ai_4',
'ai_5', 'ai_6', 'ai_7', 'ai_8']
mr = MetaData(testing.db)
for name in table_names:
tbl = Table(name, mr, autoload=True)
tbl = metadata.tables[name]
for c in tbl.c:
if c.name.startswith('int_y'):
assert c.autoincrement, name
assert tbl._autoincrement_column is c, name
elif c.name.startswith('int_n'):
assert not c.autoincrement, name
assert tbl._autoincrement_column is not c, name
# mxodbc can't handle scope_identity() with DEFAULT VALUES
if testing.db.driver == 'mxodbc':
eng = \
[engines.testing_engine(options={
'implicit_returning': True})]
else:
eng = \
[engines.testing_engine(options={
'implicit_returning': False}),
engines.testing_engine(options={
'implicit_returning': True})]
for counter, engine in enumerate(eng):
engine.execute(tbl.insert())
if 'int_y' in tbl.c:
assert engine.scalar(select([tbl.c.int_y])) \
== counter + 1
assert list(
engine.execute(tbl.select()).first()).\
count(counter + 1) == 1
else:
assert 1 \
not in list(engine.execute(tbl.select()).first())
engine.execute(tbl.delete())
class MonkeyPatchedBinaryTest(fixtures.TestBase):
__only_on__ = 'mssql+pymssql'
def test_unicode(self):
module = __import__('pymssql')
result = module.Binary('foo')
eq_(result, 'foo')
def test_bytes(self):
module = __import__('pymssql')
input = b('\x80\x03]q\x00X\x03\x00\x00\x00oneq\x01a.')
expected_result = input
result = module.Binary(input)
eq_(result, expected_result)
binary_table = None
MyPickleType = None
class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
"""Test the Binary and VarBinary types"""
__only_on__ = 'mssql'
@classmethod
def setup_class(cls):
global MyPickleType
class MyPickleType(types.TypeDecorator):
impl = PickleType
def process_bind_param(self, value, dialect):
if value:
value.stuff = 'this is modified stuff'
return value
def process_result_value(self, value, dialect):
if value:
value.stuff = 'this is the right stuff'
return value
def teardown(self):
self.binary_table.drop(testing.db)
def _fixture(self, engine):
self.binary_table = binary_table = Table(
'binary_table',
MetaData(),
Column('primary_id', Integer, Sequence('binary_id_seq',
optional=True), primary_key=True),
Column('data', mssql.MSVarBinary(8000)),
Column('data_image', mssql.MSImage),
Column('data_slice', types.BINARY(100)),
Column('misc', String(30)),
Column('pickled', PickleType),
Column('mypickle', MyPickleType),
)
binary_table.create(engine)
return binary_table
def test_binary_legacy_types(self):
self._test_binary(False)
@testing.only_on('mssql >= 11')
def test_binary_updated_types(self):
self._test_binary(True)
def test_binary_none_legacy_types(self):
self._test_binary_none(False)
@testing.only_on('mssql >= 11')
def test_binary_none_updated_types(self):
self._test_binary_none(True)
def _test_binary(self, deprecate_large_types):
testobj1 = pickleable.Foo('im foo 1')
testobj2 = pickleable.Foo('im foo 2')
testobj3 = pickleable.Foo('im foo 3')
stream1 = self._load_stream('binary_data_one.dat')
stream2 = self._load_stream('binary_data_two.dat')
engine = engines.testing_engine(
options={"deprecate_large_types": deprecate_large_types})
binary_table = self._fixture(engine)
with engine.connect() as conn:
conn.execute(
binary_table.insert(),
primary_id=1,
misc='binary_data_one.dat',
data=stream1,
data_image=stream1,
data_slice=stream1[0:100],
pickled=testobj1,
mypickle=testobj3,
)
conn.execute(
binary_table.insert(),
primary_id=2,
misc='binary_data_two.dat',
data=stream2,
data_image=stream2,
data_slice=stream2[0:99],
pickled=testobj2,
)
for stmt in \
binary_table.select(order_by=binary_table.c.primary_id), \
text(
'select * from binary_table order by '
'binary_table.primary_id',
typemap=dict(
data=mssql.MSVarBinary(8000),
data_image=mssql.MSImage,
data_slice=types.BINARY(100), pickled=PickleType,
mypickle=MyPickleType),
bind=testing.db):
with engine.connect() as conn:
l = conn.execute(stmt).fetchall()
eq_(list(stream1), list(l[0]['data']))
paddedstream = list(stream1[0:100])
paddedstream.extend(['\x00'] * (100 - len(paddedstream)))
eq_(paddedstream, list(l[0]['data_slice']))
eq_(list(stream2), list(l[1]['data']))
eq_(list(stream2), list(l[1]['data_image']))
eq_(testobj1, l[0]['pickled'])
eq_(testobj2, l[1]['pickled'])
eq_(testobj3.moredata, l[0]['mypickle'].moredata)
eq_(l[0]['mypickle'].stuff, 'this is the right stuff')
def _test_binary_none(self, deprecate_large_types):
engine = engines.testing_engine(
options={"deprecate_large_types": deprecate_large_types})
binary_table = self._fixture(engine)
stream2 = self._load_stream('binary_data_two.dat')
with engine.connect() as conn:
conn.execute(
binary_table.insert(),
primary_id=3,
misc='binary_data_two.dat', data_image=None,
data_slice=stream2[0:99], pickled=None)
for stmt in \
binary_table.select(), \
text(
'select * from binary_table',
typemap=dict(
data=mssql.MSVarBinary(8000),
data_image=mssql.MSImage,
data_slice=types.BINARY(100),
pickled=PickleType,
mypickle=MyPickleType),
bind=testing.db):
row = conn.execute(stmt).first()
eq_(
row['pickled'], None
)
eq_(
row['data_image'], None
)
# the type we used here is 100 bytes
# so we will get 100 bytes zero-padded
paddedstream = list(stream2[0:99])
paddedstream.extend(['\x00'] * (100 - len(paddedstream)))
eq_(
list(row['data_slice']), paddedstream
)
def _load_stream(self, name, len=3000):
fp = open(
os.path.join(os.path.dirname(__file__), "..", "..", name), 'rb')
stream = fp.read(len)
fp.close()
return stream
| |
"""ctrack REST API
"""
from datetime import date
import logging
from dateutil.parser import parse as parse_date
from django.db.models import Sum, Count
from django.db.models.functions import TruncMonth
from django.conf.urls import url, include
from django.http import Http404, HttpResponseBadRequest
from rest_framework import (decorators, generics, pagination, response, routers,
serializers, status, views, viewsets)
from django_filters import rest_framework as filters
import django_filters
from ctrack.models import (Account, Category, Transaction, PeriodDefinition,
RecurringPayment, Bill, BudgetEntry)
logger = logging.getLogger(__name__)
# Serializers define the API representation.
class AccountSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Account
fields = ('url', 'id', 'name', 'balance')
class CategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = ('url', 'id', 'name')
class ScoredCategorySerializer(serializers.Serializer):
id = serializers.IntegerField()
name = serializers.CharField(max_length=50)
score = serializers.IntegerField()
class CategorySummarySerializer(serializers.Serializer):
id = serializers.IntegerField()
name = serializers.CharField(max_length=50)
value = serializers.FloatField()
budget = serializers.FloatField()
class TransactionSerializer(serializers.ModelSerializer):
category_name = serializers.ReadOnlyField(source='category.name')
class Meta:
model = Transaction
fields = ('url', 'id', 'when', 'amount', 'description', 'category', 'category_name', 'account')
class BillSerializer(serializers.ModelSerializer):
is_paid = serializers.ReadOnlyField()
class Meta:
model = Bill
fields = ('url', 'id', 'description', 'due_date', 'due_amount', 'fixed_amount', 'var_amount',
'document', 'series', 'paying_transactions', 'is_paid')
class RecurringPaymentSerializer(serializers.ModelSerializer):
bills = BillSerializer(many=True, read_only=True)
class Meta:
model = RecurringPayment
fields = ('url', 'id', 'name', 'is_income', 'bills', 'next_due_date')
class SplitTransSerializer(serializers.Serializer):
category = serializers.PrimaryKeyRelatedField(queryset=Category.objects.all())
amount = serializers.DecimalField(max_digits=10, decimal_places=2)
class LoadDataSerializer(serializers.Serializer):
data_file = serializers.FileField()
class PeriodDefinitionSerializer(serializers.Serializer):
label = serializers.CharField(max_length=40)
from_date = serializers.DateField()
to_date = serializers.DateField()
id = serializers.IntegerField()
offset = serializers.IntegerField()
class SeriesSerializer(serializers.Serializer):
label = serializers.DateTimeField(source='dtime')
value = serializers.DecimalField(max_digits=20, decimal_places=2)
class SummarySerializer(serializers.Serializer):
category = serializers.IntegerField()
category_name = serializers.CharField(max_length=40, source='category__name')
total = serializers.DecimalField(max_digits=20, decimal_places=2)
class BudgetEntrySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = BudgetEntry
fields = ('url', 'id', 'pretty_name', 'amount', 'valid_from', 'valid_to', 'categories')
# ViewSets define the view behavior.
class AccountViewSet(viewsets.ModelViewSet):
queryset = Account.objects.all()
serializer_class = AccountSerializer
@decorators.action(detail=True, methods=["post"])
def load(self, request, pk=None):
account = self.get_object()
serializer = LoadDataSerializer(data=request.data)
if serializer.is_valid():
try:
account.load_ofx(serializer.validated_data['data_file'])
except (ValueError, IOError, TypeError):
logger.exception("OFX parse error")
return response.Response("Unable to load file. Bad format?",
status=status.HTTP_400_BAD_REQUEST)
return response.Response({'status': 'loaded'})
else:
return response.Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@decorators.action(detail=True, methods=["get"])
def series(self, request, pk=None):
series = self.get_object().daily_balance()
series.index.name = 'dtime'
serialised = SeriesSerializer(series.to_frame('value').reset_index().to_dict(orient='records'), many=True)
return response.Response(serialised.data)
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all().order_by('name')
serializer_class = CategorySerializer
@decorators.action(detail=True, methods=["get"])
def series(self, request, pk=None):
category = self.get_object()
queryset = category.transaction_set
result = (queryset
.annotate(dtime=TruncMonth('when'))
.values('dtime')
.annotate(value=Sum('amount'))
)
serialised = SeriesSerializer(result, many=True)
return response.Response(serialised.data)
class PageNumberSettablePagination(pagination.PageNumberPagination):
page_size_query_param = 'page_size'
page_size = 100
class DateRangeTransactionFilter(filters.FilterSet):
from_date = django_filters.DateFilter(field_name='when', lookup_expr='gte')
to_date = django_filters.DateFilter(field_name='when', lookup_expr='lte')
has_category = django_filters.BooleanFilter(
field_name='category', exclude=True, lookup_expr='isnull',
)
class Meta:
model = Transaction
fields = ('from_date', 'to_date', 'account', 'category', 'has_category')
class TransactionViewSet(viewsets.ModelViewSet):
queryset = Transaction.objects.filter(is_split=False).order_by("-when", "-pk")
serializer_class = TransactionSerializer
pagination_class = PageNumberSettablePagination
filter_class = DateRangeTransactionFilter
@decorators.action(detail=True, methods=["post"])
def split(self, request, pk=None):
transaction = self.get_object()
args = {}
for elem in request.data:
serializer = SplitTransSerializer(data=elem)
if serializer.is_valid():
args[serializer.validated_data['category']] = serializer.validated_data['amount']
else:
return HttpResponseBadRequest("Invalid arguments.")
try:
transaction.split(args)
except Exception as thrown:
return response.Response("Unable to set categories: {}".format(thrown),
status=status.HTTP_400_BAD_REQUEST)
return response.Response({"message": "Success"})
@decorators.action(detail=False, methods=["get"])
def summary(self, request):
queryset = self.filter_queryset(self.get_queryset().order_by())
result = queryset.values('category', 'category__name').annotate(total=Sum('amount')).order_by('total')
serialised = SummarySerializer(result, many=True)
return response.Response(serialised.data)
class SuggestCategories(generics.ListAPIView):
"""
Suggest categories for a transaction.
"""
serializer_class = ScoredCategorySerializer
def get_queryset(self):
try:
transaction = Transaction.objects.get(pk=self.kwargs['pk'])
except Transaction.DoesNotExist:
raise Http404
try:
return transaction.suggest_category()
except Category.DoesNotExist:
raise Http404
class PeriodDefinitionView(views.APIView):
queryset = PeriodDefinition.objects.all()
def get(self, formats=None):
data = sum((period.option_specifiers
for period in PeriodDefinition.objects.all()), [])
return response.Response(data)
class RecurringPaymentViewSet(viewsets.ModelViewSet):
queryset = RecurringPayment.objects.all().order_by('name')
serializer_class = RecurringPaymentSerializer
@decorators.action(detail=True, methods=["post"])
def loadpdf(self, request, pk=None):
payments = self.get_object()
serializer = LoadDataSerializer(data=request.data)
if serializer.is_valid():
try:
payments.add_bill_from_file(serializer.validated_data['data_file'])
except (ValueError, IOError, TypeError):
logger.exception("Unable to load PDF")
return response.Response("Unable to load file. Bad format?",
status=status.HTTP_400_BAD_REQUEST)
return response.Response({'status': 'loaded'})
else:
return response.Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
class BillViewSet(viewsets.ModelViewSet):
queryset = Bill.objects.all().order_by('-due_date')
serializer_class = BillSerializer
filter_backends = (filters.backends.DjangoFilterBackend,)
filter_fields = ('due_date', 'series')
class CategorySummary(generics.ListAPIView):
"""
Summaries for all categories.
"""
serializer_class = CategorySummarySerializer
def get_queryset(self):
from_date = parse_date(self.kwargs["from"])
to_date = parse_date(self.kwargs["to"])
filters = {
"when__gte": from_date,
"when__lte": to_date,
}
result = []
for budget_entry in BudgetEntry.objects.for_period(to_date).order_by("-amount"):
transactions = Transaction.objects.filter(category__in=budget_entry.categories.values_list("pk", flat=True), **filters)
value = 0.0
if transactions:
value = float(transactions.aggregate(sum=Sum("amount"))["sum"])
budget = budget_entry.amount_over_period(from_date, to_date)
result.append({
"id": budget_entry.id,
"name": budget_entry.pretty_name,
"value": value,
"budget": budget
})
return result
class BudgetEntryViewSet(viewsets.ModelViewSet):
queryset = BudgetEntry.objects.all()
serializer_class = BudgetEntrySerializer
router = routers.DefaultRouter()
router.register(r'accounts', AccountViewSet)
router.register(r'categories', CategoryViewSet)
router.register(r'transactions', TransactionViewSet)
router.register(r'payments', RecurringPaymentViewSet)
router.register(r'bills', BillViewSet)
router.register(r'budget', BudgetEntryViewSet)
urls = [
url(r'^transactions/(?P<pk>[0-9]+)/suggest$', SuggestCategories.as_view()),
url(r'^categories/summary/(?P<from>[0-9]+)/(?P<to>[0-9]+)$', CategorySummary.as_view()),
url(r'^periods/$', PeriodDefinitionView.as_view()),
url(r'^', include(router.urls)),
]
| |
# Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
from common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(TestCase):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(TestCase):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46,
4.43,0.21,4.75,0.71,1.52,3.24,
0.93,0.42,4.97,9.53,4.55,0.47,6.66]
w,pw = stats.shapiro(x1)
assert_almost_equal(w,0.90047299861907959,6)
assert_almost_equal(pw,0.042089745402336121,6)
x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11,
3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69,
0.08,3.67,2.81,3.49]
w,pw = stats.shapiro(x2)
assert_almost_equal(w,0.9590270,6)
assert_almost_equal(pw,0.52460,3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A,crit,sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1,'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A,crit,sig = stats.anderson(x2,'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(TestCase):
def test_small(self):
x = [1,2,3,3,4]
y = [3,2,6,1,6,1,4,1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
W, pval = stats.ansari(x,y)
assert_almost_equal(W,23.5,11)
assert_almost_equal(pval,0.13499256881897437,11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W,185.5,11)
assert_almost_equal(pval,0.18145819972867083,11)
def test_exact(self):
W,pval = stats.ansari([1,2,3,4],[15,5,20,8,10,12])
assert_almost_equal(W,10.0,11)
assert_almost_equal(pval,0.533333333333333333,7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T,20.78587342806484,7)
assert_almost_equal(pval,0.0136358632781,7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W,1.7059176930008939,7)
assert_almost_equal(pval,0.0990829755522,7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1,1,21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100,250)
assert_almost_equal(pval,0.0018833009350757682,11)
pval = stats.binom_test(201,405)
assert_almost_equal(pval,0.92085205962670713,11)
pval = stats.binom_test([682,243],p=3.0/4)
assert_almost_equal(pval,0.38249155957481695,11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1,2,3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1,x1**2),
(3.2282229927203536, 0.072379187848207877), 11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478), 11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
#Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1,2])
assert_raises(ValueError, stats.wilcoxon, [1,2], [1,2], "dummy")
class TestKstat(TestCase):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(TestCase):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182, 0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10, dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(TestCase):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355,5,2,359,10,350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20,21,22,18,19,20.5,19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355,5,2,359,10,350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(TestCase):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], ties="foo")
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5], foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency table,
# so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
if __name__ == "__main__":
run_module_suite()
| |
#!C:/Python34/python
__author__ = 'Max Dignan'
#
#
# This file is open sourced under the MIT License
# Original Author: Max Dignan
#
# This block must be kept in any, and all, future iterations
# as defined by the MIT License
#
#
#
import hashlib
import uuid
import os
import pickle
# will allow user to connect with specific data table
# the username and password will be cross checked
# with corresponding table
def connect(table, username, password):
address = os.getcwd()
url = address + "/" + table
try:
handle = pickle.load(open(url, "rb"))
except:
print('INVALID ENTRY')
handle = []
users = handle[0]
index = 0
enterred_pass = password
for ind in users:
if username == ind[0]:
if hash_password(password, ind[1]) == ind[1]:
password = hash_password(password, ind[1])
break
index += 1
table = Table(table, username, password)
if table.permit() and not enterred_pass == password:
return table
else:
print('INCORRECT INFORMATION')
# makes new table, will give original permissions to user: root
# password: root
# this can be deleted later on
def make_new_table(tablename):
salt = uuid.uuid4().hex
root = 'root'
hash = hashlib.sha512(salt.encode() + root.encode()).hexdigest() + "<<<>>>" + salt
table = Table(tablename, 'root', hash)
table.commit()
print(r"Table '" + tablename + "' created with username 'root' and password 'root'")
def hash_password(plainText, encoded):
salt = encoded.split('<<<>>>')[1]
hash = hashlib.sha512(salt.encode() + plainText.encode()).hexdigest() + "<<<>>>" + salt
return hash
class Table:
def __init__(self, tablename, currentUser, currentPassword):
self.currentUser = currentUser
self.currentPassword = currentPassword
self.tableName = tablename
self.users = [[currentUser, currentPassword]]
self.data = [[]]
self.first_load = True
# checks table permissions
def permit(self):
address = os.getcwd()
url = address + "/" + self.tableName
handle = pickle.load(open(url, "rb"))
users = handle[0]
length = len(users)
index = 0
while index < length:
userHandle = users[index]
if self.currentUser == userHandle[0]:
if userHandle[1] == self.currentPassword:
if self.first_load:
self.data = handle[1]
self.users = users
self.first_load = False
return True
index += 1
return False
def length(self):
return len(self.data)
def dump(self,csv_file):
if self.permit():
import csv
with open(csv_file, 'w', newline='') as fp:
a = csv.writer(fp, delimiter=',')
obj = self.data
a.writerows(obj)
# commits adjusted data into file
def commit(self):
address = os.getcwd()
url = address + "/" + self.tableName
obj = [self.users, self.data]
pickle.dump(obj, open(url, "wb"))
# gives all data in table
def give_data(self):
if self.permit():
return self.data
# delete all of table's data
def delete_table(self):
if self.permit():
self.data = []
# adds a new user to table
def add_user(self, newUser, newPassword):
if self.permit():
salt = uuid.uuid4().hex
hash = hashlib.sha512(salt.encode() + newPassword.encode()).hexdigest() + "<<<>>>" + salt
tempObject = [newUser, hash]
self.users.append(tempObject)
print(r"username: '" + newUser + "' and password: '" + newPassword + "' added to table")
self.commit()
print('automatically saved to table on disk')
# deletes a user from having access to database
def delete_user(self, username):
if self.permit():
allUsernames = []
index = 0
while index < len(self.users):
allUsernames.append(self.users[index][0])
index += 1
indexToBurn = allUsernames.index(username)
del self.users[indexToBurn]
self.commit()
print('automatically deleted and saved to table on disk')
# tests whether category exists in a row
def check_category_present(self, row, categoryName):
if self.permit():
keyPairIndex = 0
while keyPairIndex < len(self.data[row]):
if categoryName == self.data[row][keyPairIndex][0]:
return True
keyPairIndex += 1
return False
# adds new row to table
def add_row(self):
if self.permit():
tempObject = []
self.data.append(tempObject)
# removes row from table
# do note: subsequent rows will be moved back one space
def delete_row(self, rowNumber):
if self.permit():
try:
del self.data[rowNumber]
except:
print(r"Can't delete row, because row doesn't exist")
# gives whole row as a list
def access_row(self, rowNumber):
if self.permit():
return self.data[rowNumber]
# edits whole row
# pass list in of other lists of two values
# zeroeth in each inner list is category with first as value
def edit_row(self, rowNumber, newRow):
if self.permit():
if type(newRow) == list:
self.data[rowNumber] = newRow
else:
print('new row must be a list, with lists within it for values')
# changes contents of both rows given to the contents of the other
def swap_rows(self, firstRowNumber, secondRowNumber):
if self.permit():
if firstRowNumber < len(self.data):
if secondRowNumber < len(self.data):
temp = self.data[firstRowNumber]
self.data[firstRowNumber] = self.data[secondRowNumber]
self.data[secondRowNumber] = temp
else:
print('invalid second row, beyond scope of row')
else:
print('invalid first row, beyond scope of row')
# edit value to row's category
def edit_row_value(self, rowNumber, categoryName, value):
if self.permit():
tempind = 0
index = -1
while tempind < len(self.data[rowNumber]):
try:
index = self.data[rowNumber][tempind].index(categoryName)
break
except:
tempind += 1
if index != -1:
self.data[rowNumber][index][1] = value
# remove category and value from specific row
def delete_category_and_value(self, rowNumber, category):
if self.permit():
if self.check_category_present(rowNumber, category):
keyPairIndex = 0
while keyPairIndex < len(self.data[rowNumber]):
if self.data[rowNumber][keyPairIndex][0] == category:
if len(self.data[rowNumber]) == 1:
break
else:
del self.data[rowNumber][keyPairIndex]
keyPairIndex += 1
print(r"last item in row deleted, remember to delete row" + rowNumber)
del self.data[rowNumber][keyPairIndex]
# adds category to all rows
def add_category_to_all(self, categoryName):
if self.permit():
rowIndex = 0
while rowIndex < len(self.data):
if not self.check_category_present(rowIndex, categoryName):
self.add_category_and_value(rowIndex, categoryName, '000null000')
rowIndex += 1
# adds category and value to row in table
def add_category_and_value(self, rowNumber, category, value):
if self.permit():
if rowNumber == len(self.data):
self.add_row()
if self.check_category_present(rowNumber, category):
print('category and value already present')
return False
tempObject = [category, value]
self.data[rowNumber].append(tempObject)
return True
# returns value for category and row specified
def get_rows_value(self, rowNumber, categoryName):
if self.permit():
a = 0
if self.check_category_present(rowNumber, categoryName):
categoryIndex = 0
while categoryIndex < len(self.data[rowNumber]):
try:
index = self.data[rowNumber][categoryIndex].index(categoryName)
return self.data[rowNumber][index][1]
except:
categoryIndex += 1
return False
else:
return False
# lists all values of each row for a given category name
# will have 'No Value Set' listed for a row without
# a value for given category
def list_values_by_category(self, categoryName):
if self.permit():
rowIndex = 0
theList = []
while rowIndex < len(self.data):
if not self.get_rows_value(rowIndex, categoryName):
theList.append('No Value Set')
else:
theList.append(self.get_rows_value(rowIndex, categoryName))
rowIndex += 1
return theList
# lists all categories in table
def list_all_categories(self):
if self.permit():
theList = []
rowIndex = 0
while rowIndex < len(self.data):
categoryIndex = 0
while categoryIndex < len(self.data[rowIndex]):
categoryName = self.data[rowIndex][categoryIndex][0]
try:
theList.index(categoryName)
except:
theList.append(categoryName)
categoryIndex += 1
rowIndex += 1
return theList
| |
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
#
# June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com>
# * Updated to no longer generate special -dbg package, instead use the
# single system -dbg
# * Update version with ".1" to indicate this change
import os
import sys
import time
VERSION = "2.7.2"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20110222.2"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in sorted(self.packages):
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="${PN}-dbg '
for name in sorted(self.packages):
if name.startswith("${PN}-distutils"):
if name == "${PN}-distutils":
packageLine += "%s-staticdev %s " % (name, name)
elif name != '${PN}-dbg':
packageLine += "%s " % name
packageLine += '${PN}-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in sorted(self.packages.iteritems()):
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'DESCRIPTION_${PN}-modules="All Python modules"' )
line = 'RDEPENDS_${PN}-modules="'
for name, data in sorted(self.packages.iteritems()):
if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
try:
os.unlink(sys.argv[1])
except Exception:
sys.exc_clear()
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "${PN}-core", "Python Interpreter and core modules (needed!)", "${PN}-lang ${PN}-re",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
"lib-dynload/xreadlines.so types.* platform.* ${bindir}/python* " +
"_weakrefset.* sysconfig.* config/Makefile " +
"${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " +
"${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ")
m.addPackage( "${PN}-dev", "Python Development Package", "${PN}-core",
"${includedir} " +
"${libdir}/lib*${SOLIBSDEV} " +
"${libdir}/*.la " +
"${libdir}/*.a " +
"${libdir}/*.o " +
"${libdir}/pkgconfig " +
"${base_libdir}/*.a " +
"${base_libdir}/*.o " +
"${datadir}/aclocal " +
"${datadir}/pkgconfig " )
m.addPackage( "${PN}-2to3", "Python Automated Python 2 to 3 code translation", "${PN}-core",
"${bindir}/2to3 lib2to3" ) # package
m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "${PN}-pydoc", "Python Interactive Help Support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
"${bindir}/pydoc pydoc.* pydoc_data" )
m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
"${bindir}/smtpd.* smtpd.*" )
m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so audiodev.* sunaudio.* sunau.* toaiff.*" )
m.addPackage( "${PN}-bsddb", "Python Berkeley Database Bindings", "${PN}-core",
"bsddb lib-dynload/_bsddb.so" ) # package
m.addPackage( "${PN}-codecs", "Python Codecs, Encodings & i18n Support", "${PN}-core ${PN}-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/_codecs* lib-dynload/_multibytecodec.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( "${PN}-compile", "Python Bytecode Compilation Support", "${PN}-core",
"py_compile.* compileall.*" )
m.addPackage( "${PN}-compiler", "Python Compiler Support", "${PN}-core",
"compiler" ) # package
m.addPackage( "${PN}-compression", "Python High Level Compression Support", "${PN}-core ${PN}-zlib",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
m.addPackage( "${PN}-crypt", "Python Basic Cryptographic and Hashing Support", "${PN}-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
m.addPackage( "${PN}-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( "${PN}-curses", "Python Curses Support", "${PN}-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
m.addPackage( "${PN}-ctypes", "Python C Types Support", "${PN}-core",
"ctypes lib-dynload/_ctypes.so lib-dynload/_ctypes_test.so" ) # directory + low level module
m.addPackage( "${PN}-datetime", "Python Calendar and Time support", "${PN}-core ${PN}-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( "${PN}-db", "Python File-Based Database Support", "${PN}-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( "${PN}-debugger", "Python Debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint",
"bdb.* pdb.*" )
m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects.", "${PN}-lang ${PN}-re",
"difflib.*" )
m.addPackage( "${PN}-distutils-staticdev", "Python Distribution Utilities (Static Libraries)", "${PN}-distutils",
"config/lib*.a" ) # package
m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core",
"config distutils" ) # package
m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings.", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core",
"lib-dynload/_elementtree.so" )
m.addPackage( "${PN}-email", "Python Email Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
"imaplib.* email" ) # package
m.addPackage( "${PN}-fcntl", "Python's fcntl Interface", "${PN}-core",
"lib-dynload/fcntl.so" )
m.addPackage( "${PN}-hotshot", "Python Hotshot Profiler", "${PN}-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( "${PN}-html", "Python HTML Processing", "${PN}-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
m.addPackage( "${PN}-gdbm", "Python GNU Database Support", "${PN}-core",
"lib-dynload/gdbm.so" )
m.addPackage( "${PN}-image", "Python Graphical Image Handling", "${PN}-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( "${PN}-io", "Python Low-Level I/O", "${PN}-core ${PN}-math ${PN}-textutils",
"lib-dynload/_socket.so lib-dynload/_io.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
m.addPackage( "${PN}-json", "Python JSON Support", "${PN}-core ${PN}-math ${PN}-re",
"json lib-dynload/_json.so" ) # package
m.addPackage( "${PN}-lang", "Python Low-Level Language Support", "${PN}-core",
"lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
"lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* weakref.*" )
m.addPackage( "${PN}-logging", "Python Logging Support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
"logging" ) # package
m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.addPackage( "${PN}-math", "Python Math Support", "${PN}-core ${PN}-crypt",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( "${PN}-mime", "Python MIME Handling APIs", "${PN}-core ${PN}-io",
"mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" )
m.addPackage( "${PN}-mmap", "Python Memory-Mapped-File Support", "${PN}-core ${PN}-io",
"lib-dynload/mmap.so " )
m.addPackage( "${PN}-multiprocessing", "Python Multiprocessing Support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading",
"lib-dynload/_multiprocessing.so multiprocessing" ) # package
m.addPackage( "${PN}-netclient", "Python Internet Protocol Clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "${PN}-netserver", "Python Internet Protocol Servers", "${PN}-core ${PN}-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "${PN}-numbers", "Python Number APIs", "${PN}-core ${PN}-lang ${PN}-re",
"decimal.* numbers.*" )
m.addPackage( "${PN}-pickle", "Python Persistence Support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
"pickle.* shelve.* lib-dynload/cPickle.so pickletools.*" )
m.addPackage( "${PN}-pkgutil", "Python Package Extension Utility Support", "${PN}-core",
"pkgutil.*")
m.addPackage( "${PN}-pprint", "Python Pretty-Print Support", "${PN}-core ${PN}-io",
"pprint.*" )
m.addPackage( "${PN}-profile", "Python Basic Profiling Support", "${PN}-core ${PN}-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "${PN}-readline", "Python Readline Support", "${PN}-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( "${PN}-resource", "Python Resource Control Interface", "${PN}-core",
"lib-dynload/resource.so" )
m.addPackage( "${PN}-shell", "Python Shell-Like Functionality", "${PN}-core ${PN}-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient",
"robotparser.*")
m.addPackage( "${PN}-subprocess", "Python Subprocess Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
"subprocess.*" )
m.addPackage( "${PN}-sqlite3", "Python Sqlite3 Database Support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading ${PN}-zlib",
"lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 Database Support Tests", "${PN}-core ${PN}-sqlite3",
"sqlite3/test" )
m.addPackage( "${PN}-stringold", "Python String APIs [deprecated]", "${PN}-core ${PN}-re",
"lib-dynload/strop.so string.* stringold.*" )
m.addPackage( "${PN}-syslog", "Python Syslog Interface", "${PN}-core",
"lib-dynload/syslog.so" )
m.addPackage( "${PN}-terminal", "Python Terminal Controlling Support", "${PN}-core ${PN}-io",
"pty.* tty.*" )
m.addPackage( "${PN}-tests", "Python Tests", "${PN}-core",
"test" ) # package
m.addPackage( "${PN}-threading", "Python Threading & Synchronization Support", "${PN}-core ${PN}-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "${PN}-tkinter", "Python Tcl/Tk Bindings", "${PN}-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( "${PN}-unittest", "Python Unit Testing Framework", "${PN}-core ${PN}-stringold ${PN}-lang",
"unittest/" )
m.addPackage( "${PN}-unixadmin", "Python Unix Administration Support", "${PN}-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( "${PN}-xml", "Python basic XML support.", "${PN}-core ${PN}-elementtree ${PN}-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( "${PN}-xmlrpc", "Python XMLRPC Support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
"xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.*" )
m.addPackage( "${PN}-zlib", "Python zlib Support.", "${PN}-core",
"lib-dynload/zlib.so" )
m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.make()
| |
#from __future__ import print_function
#I learned and develop in python 3.x, whatever is in my distribution packages (3.2 currently)
#I've found with the print function 2.7 generally works the same
import re, sys, string, glob, argparse, os
from os.path import *
from collections import namedtuple
if_regex = r"\s(?=(if\W.*?{))"
for_regex = r"\s(?=(for\W.*?{))"
do_regex = r"\s(?=(do\W.*?{))"
switch_regex = r"\s(?=(switch\W.*?{))"
else_regex = r"\s(?=(else\W.*?{))"
while_regex = r"\s(?=(while\W.*?{))"
if_re = re.compile(if_regex, re.VERBOSE | re.DOTALL)
for_re = re.compile(for_regex, re.VERBOSE | re.DOTALL)
do_re = re.compile(do_regex, re.VERBOSE)
switch_re = re.compile(switch_regex, re.VERBOSE)
else_re = re.compile(else_regex, re.VERBOSE)
while_re = re.compile(while_regex, re.VERBOSE)
comment = namedtuple('comment', ['start', 'end'])
def insert(str1, str2, pos):
return str1[:pos] + str2 + str1[pos:]
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
def enum(**enums):
return type('Enum', (), enums)
#returns the positions of the first ( and matching )
#starting at i, skipping comments of course
def find_open_close_paren(s, i, end):
paren = 0
open_paren = -1
while i < end:
#print(i,end=' ')
if s[i] == '(':
if open_paren < 0:
open_paren = i
paren += 1
elif s[i] == ')':
paren -= 1;
if not paren:
break
elif s[i:i+2] == '/*':
i = s.find('*/', i+2) + 1 # set i to first character after closing - 1 because increment at end */
elif s[i:i+2] == '//':
i = s.find('\n', i+2) # set i to \n because inc at end
if i <= 0: #<= because i + 1 in the /* case
return open_paren, -1
i += 1
return open_paren, i;
#find and return position of first uncommented character c in s starting at i
def find_first_of(s, i, c):
while True:
if s[i] == c:
break
elif s[i:i+2] == '/*':
i = s.find('*/', i+2) + 1 # set i to first character after closing - 1 because increment at end */
elif s[i:i+2] == '//':
i = s.find('\n', i+2) # set i to \n because inc at end
i += 1
return i;
#find and return position of first uncommented non-whitespace character in s starting at i
def find_first(s, i, end):
while i < end:
if s[i:i+2] == '/*':
i = s.find('*/', i+2) + 1 # set i to first character after closing - 1 because increment at end */
elif s[i:i+2] == '//':
i = s.find('\n', i+2) # set i to \n because inc at end
elif not s[i].isspace():
break
i += 1
return i;
#return a tuple (x, y) where x is comment index i is in
#or -1 if it's not in one and y is the first comment after
#i, so if i is in a comment it would return (x, x+1)
def in_comment(i, comment_list):
if not comment_list:
return -1, -1
j = 0
k = 0
while j < len(comment_list):
if i > comment_list[j].start:
k += 1
if i < comment_list[j].end:
return j, k
j += 1
return -1, k
def fix_if(match, file_string, comment_list):
m_start = match.start(1)
m_end = match.end(1)
start_len = 2
start_in_comment, next_comment_start = in_comment(m_start, comment_list)
if start_in_comment >= 0:
return file_string, comment_list[start_in_comment].end, next_comment_start #return position at end of comment
end_in_comment, next_comment_end = in_comment(m_end-1, comment_list)
if end_in_comment >= 0: #return position after comment This won't
return file_string, comment_list[end_in_comment].end, next_comment_end #fix something like for() /* { */\n/t { oh well
s = match.group(1)
open_paren, close_paren = find_open_close_paren(s, start_len, len(s))
if open_paren == -1 or close_paren == -1:
return file_string, comment_list[next_comment_start].end, next_comment_end #I think this is right
after_paren = close_paren + 1
i = find_first(s, after_paren, len(s))
if i >= len(s): #must not be valid (not in comment or string literal but could be #ifdef'd . . .
return file_string, m_start + start_len, next_comment_start # can just check here instead of after paren too
#i is pos of first uncommented char after
if s[i] != '{': #if it's not a brace, it's a statement or new block s
return file_string, m_start + start_len, next_comment_start #don't touch if it doesn't have braces (brace at end of match is for something else)
nl_after_paren = s.find('\n', after_paren)
# find amount to add to after_paren to get rid of any trailing whitespace
# if there is any non-whitespace character after ) before {, keep everything
if not s[after_paren:nl_after_paren].isspace():
nl_after_paren = 0
else:
nl_after_paren = len(s[after_paren:nl_after_paren]) - len(s[after_paren:nl_after_paren].lstrip(string.whitespace.replace('\n','')))
if not be_cautious:
s = 'if '+ s[start_len:open_paren].strip() +'(' + s[open_paren+1:close_paren].strip() + ') {' + s[after_paren+nl_after_paren:-1] #cut off last character, the brace
else:
s = 'if '+ s[start_len:open_paren].strip() + s[open_paren:close_paren+1] + ' {' + s[after_paren+nl_after_paren:-1] #cut off last character, the brace
return file_string[:m_start] + s + file_string[m_end:], m_end, None
#python does short circuit boolean expressions
#seems to work but does not preserve comments anymore
#will fix later. Probably currently hugely unoptimal cause I don't know
#the best pythonic way
def fix_for(match, file_string, comment_list):
m_start = match.start(1)
m_end = match.end(1)
start_len = 3
start_in_comment, next_comment_start = in_comment(m_start, comment_list)
if start_in_comment >= 0:
return file_string, comment_list[start_in_comment].end, next_comment_start #return position at end of comment
end_in_comment, next_comment_end = in_comment(m_end-1, comment_list)
if end_in_comment >= 0: #return position after comment This won't
return file_string, comment_list[end_in_comment].end, next_comment_end #fix something like for() /* { */\n/t { oh well
s = match.group(1)
open_paren, close_paren = find_open_close_paren(s, start_len, len(s))
if open_paren == -1 or close_paren == -1:
return file_string, comment_list[next_comment_start].end, next_comment_end #I think this is right
after_paren = close_paren + 1
#returning len(s) can still happen if the match starts in a string literal
#or something other than a comment but we can just check once after find_first
#after_paren is first char after )
i = find_first(s, after_paren, len(s))
if i >= len(s):
return file_string, m_start + start_len, next_comment_start
#i is pos of first uncommented char after
if s[i] != '{': #if it's not a brace, it's a statement or new block s
return file_string, m_start + start_len, next_comment_start #don't touch if it doesn't have braces (brace at end of match is for something else)
nl_after_paren = s.find('\n', after_paren)
# find amount to add to after_paren to get rid of any trailing whitespace
# if there is any non-whitespace character after ) before {, keep everything
if not s[after_paren:nl_after_paren].isspace():
nl_after_paren = 0
else:
nl_after_paren = len(s[after_paren:nl_after_paren]) - len(s[after_paren:nl_after_paren].lstrip(string.whitespace.replace('\n','')))
#brace is the last character and first uncommented
if not be_cautious:
s = 'for '+ s[start_len:open_paren].strip() + '(' + s[open_paren+1:close_paren].strip() + ') {' + s[after_paren+nl_after_paren:-1] #cut off last character, the brace
else:
s = 'for '+ s[start_len:open_paren].strip() + s[open_paren:close_paren+1] + ' {' + s[after_paren+nl_after_paren:-1] #cut off last character, the brace
return file_string[:m_start] + s + file_string[m_end:], m_end, None
#works
def fix_do(match, file_string, comment_list):
m_start = match.start(1)
m_end = match.end(1)
start_len = 2
start_in_comment, next_comment_start = in_comment(m_start, comment_list)
if start_in_comment >= 0:
return file_string, comment_list[start_in_comment].end, next_comment_start #return position at end of comment
end_in_comment, next_comment_end = in_comment(m_end-1, comment_list)
if end_in_comment >= 0: #return position after comment This won't
return file_string, comment_list[end_in_comment].end, next_comment_end #fix something like for() /* { */\n/t { oh well
s = match.group(1)
i = find_first(s, start_len, len(s))
if i >= len(s):
return file_string, m_start + start_len, next_comment_start
#i is pos of first uncommented char after
if s[i] != '{': #if it's not a brace, it's a statement or new block s
return file_string, m_start + start_len, next_comment_start #don't touch if it doesn't have braces (brace at end of match is for something else)
#brace is the last character and first uncommented
s = 'do {' + s[start_len:-1].lstrip() #cut off last character, the brace
return file_string[:m_start] + s + file_string[m_end:], m_end, None
def fix_switch(match, file_string, comment_list):
m_start = match.start(1)
m_end = match.end(1)
start_len = 6
start_in_comment, next_comment_start = in_comment(m_start, comment_list)
if start_in_comment >= 0:
return file_string, comment_list[start_in_comment].end, next_comment_start #return position at end of comment
end_in_comment, next_comment_end = in_comment(m_end-1, comment_list)
if end_in_comment >= 0: #return position after comment This won't
return file_string, comment_list[end_in_comment].end, next_comment_end #fix something like for() /* { */\n/t { oh well
s = match.group(1)
open_paren, close_paren = find_open_close_paren(s, start_len, len(s))
if open_paren == -1 or close_paren == -1:
return file_string, comment_list[next_comment_start].end, next_comment_end #I think this is right
after_paren = close_paren + 1
i = find_first(s, after_paren, len(s))
if i >= len(s):
return file_string, m_start + start_len, next_comment_start
#i is pos of first uncommented char after
if s[i] != '{': #if it's not a brace, it's a statement or new block s
return file_string, m_start + start_len, next_comment_start #don't touch if it doesn't have braces (brace at end of match is for something else)
nl_after_paren = s.find('\n', after_paren)
# find amount to add to after_paren to get rid of any trailing whitespace
# if there is any non-whitespace character after ) before {, keep everything
if not s[after_paren:nl_after_paren].isspace():
nl_after_paren = 0
else:
nl_after_paren = len(s[after_paren:nl_after_paren]) - len(s[after_paren:nl_after_paren].lstrip(string.whitespace.replace('\n','')))
#brace is the last character and first uncommented
if not be_cautious:
s = 'switch ' + s[start_len:open_paren].strip() + '(' + s[open_paren+1:close_paren].strip() + ') {' + s[after_paren+nl_after_paren:-1] #cut off last character, the brace
else:
s = 'switch '+ s[start_len:open_paren].strip() + s[open_paren:close_paren+1] + ' {' + s[after_paren+nl_after_paren:-1] #cut off last character, the brace
return file_string[:m_start] + s + file_string[m_end:], m_end, None
#works
def fix_else(match, file_string, comment_list):
m_start = match.start(1)
m_end = match.end(1)
start_len = 4
start_in_comment, next_comment_start = in_comment(m_start, comment_list)
if start_in_comment >= 0:
return file_string, comment_list[start_in_comment].end, next_comment_start #return position at end of comment
end_in_comment, next_comment_end = in_comment(m_end-1, comment_list)
if end_in_comment >= 0: #return position after comment This won't
return file_string, comment_list[end_in_comment].end, next_comment_end #fix something like for() /* { */\n/t { oh well
s = match.group(1)
i = find_first(s, start_len, len(s))
if i >= len(s):
return file_string, m_start + start_len, next_comment_start
#if s[i:i+2] == 'if':
#return file_string, m_start+start_len #ignore else if case for now, most people put them on the same line anyway
#i is pos of first uncommented char after
if s[i] != '{': #if it's not a brace, it's a statement or new block s
return file_string, m_start + start_len, next_comment_start #don't touch if it doesn't have braces (brace at end of match is for something else)
#brace is the last character and first uncommented
s = 'else {' + s[start_len:-1].lstrip() #cut off last character, the brace
return file_string[:m_start] + s + file_string[m_end:], m_end, None
#need to keep testing
def fix_while(match, file_string, comment_list):
m_start = match.start(1)
m_end = match.end(1)
start_len = 5
start_in_comment, next_comment_start = in_comment(m_start, comment_list)
if start_in_comment >= 0:
return file_string, comment_list[start_in_comment].end, next_comment_start #return position at end of comment
end_in_comment, next_comment_end = in_comment(m_end-1, comment_list)
if end_in_comment >= 0: #return position after comment This won't
return file_string, comment_list[end_in_comment].end, next_comment_end #fix something like for() /* { */\n/t { oh well
s = match.group(1)
open_paren, close_paren = find_open_close_paren(s, start_len, len(s))
if open_paren == -1 or close_paren == -1:
return file_string, comment_list[next_comment_start].end, next_comment_end #I think this is right
after_paren = close_paren + 1
i = find_first(s, after_paren, len(s))
if i >= len(s):
return file_string, m_start + start_len, next_comment_start
#i is pos of first uncommented char after
if s[i] != '{': #if it's not a brace, it's a statement or new block s
return file_string, m_start + start_len, next_comment_start #don't touch if it doesn't have braces (brace at end of match is for something else)
nl_after_paren = s.find('\n', after_paren)
if not s[after_paren:nl_after_paren].isspace():
nl_after_paren = 0
else:
nl_after_paren = len(s[after_paren:nl_after_paren]) - len(s[after_paren:nl_after_paren].lstrip(string.whitespace.replace('\n','')))
#brace is the last character and first uncommented
if not be_cautious:
s = 'while ' + s[start_len:open_paren].strip() + '(' + s[open_paren+1:close_paren].strip() + ') {' + s[after_paren+nl_after_paren:-1] #cut off last character, the brace
else:
s = 'while '+ s[start_len:open_paren].strip() + s[open_paren:close_paren+1] + ' {' + s[after_paren+nl_after_paren:-1] #cut off last character, the brace
return file_string[:m_start] + s + file_string[m_end:], m_end, None
#returns a list of tuples of comment and string literal start and end, ie s[start:end] is the comment
def find_non_code(s, start=0):
comment_list = [] #list of tuples (start, end)
i = start
while True:
cpp_comment = s.find('//', i)
c_comment = s.find('/*', i)
#want to skip escaped quotations
tmp = i
while True:
str_literal = s.find('"', tmp) #have to handle escaped double quote and the single character '"'
if s[str_literal-1] != '\\' and s[str_literal-1] != "'" or str_literal == -1:
break
tmp += 1
c_comment = c_comment if c_comment != -1 else len(s);
cpp_comment = cpp_comment if cpp_comment != -1 else len(s);
str_literal = str_literal if str_literal != -1 else len(s);
if c_comment == len(s) and c_comment == cpp_comment and c_comment == str_literal:
break
end = 0
if c_comment < cpp_comment and c_comment < str_literal:
end = s.find('*/', c_comment) + 2
comment_list.append(comment(c_comment, end))
i = end
elif cpp_comment < str_literal:
end = s.find('\n', cpp_comment) + 1
comment_list.append(comment(cpp_comment, end))
i = end
else: #str_liteal is first/least
tmp = str_literal+1
while True: #ignore escaped "'s
end = s.find('"', tmp)
if s[end-1] != '\\' : # don't have to check for -1 here (except maybe at end of file or code that already doesn't compile
break
else:
n = 1
while s[end-n] == '\\':
n += 1
if n % 2 == 1: # handle something like "blah blah \\" break if n is odd because n will be 1 more than num \'s
break # I should make a function for all of this
tmp += 1
end += 1
comment_list.append(comment(str_literal, end))
i = end
if len(comment_list) > 1000000:
print("There must be an error, > 1000000 comments and string literals, exiting")
sys.exit()
return comment_list
def recurse_dir(root, filetypes, exclude): # for a root dir
files = []
for (thisdir, subshere, fileshere) in os.walk(root): # generate dirs in tree
if any(samefile(thisdir, e) for e in exclude):
continue
for t in filetypes:
tmp = glob.glob(thisdir+'/*'+t)
for f in tmp:
if any(samefile(f, e) for e in exclude):
continue
files.append(f)
return files
def fix_construct(regex, fix_func, c_file_string):
match = regex.search(c_file_string)
comment_list = []
pos = 0
start_comment = None
while match:
if not start_comment:
comment_list = find_non_code(c_file_string, pos)
else: #last match wasn't edited so just
comment_list = comment_list[start_comment:] #cut comment list to comments after starting search position
c_file_string, pos, start_comment = fix_func(match, c_file_string, comment_list)
match = regex.search(c_file_string, pos)
return c_file_string
regexes = [if_re, for_re, do_re, switch_re, else_re, while_re]
fix_functions = [fix_if, fix_for, fix_do, fix_switch, fix_else, fix_while]
suffix_help ="""specify a suffix string to append to input files for output,
ie -s _fixed writes the results of fixing file1.cpp to file1.cpp_fixed"""
cautious_help ="""don't do some things (like stripping whitespace inside parens ie without -c, if ( a == b ) { becomes if (a == b) {"""
def main():
parser = argparse.ArgumentParser(description="Convert C/C++ files to The One True Brace Style")
parser.add_argument("-i", "--input", nargs="+", default=[sys.stdin], help="the input file(s) and directories (default = stdin)")
parser.add_argument("-f", "--filetypes", nargs="+", default=[".c", ".cpp"], help="the filetypes to fix in directories (default = ['.c', '.cpp]")
parser.add_argument("-r", "--recursive", action="store_true", help="search through given directories recursively")
parser.add_argument("-c", "--cautious", action="store_true", help=cautious_help)
parser.add_argument("-e", "--exclude", nargs="+", default=[], help="files or directories to exclude ie use -r but want to exclude certain subdir or files)")
group = parser.add_mutually_exclusive_group()
group.add_argument("-o", "--overwrite", action="store_true", help="overwrite fixed files (Make a backup or use source control!!)")
group.add_argument("-s", "--suffix", help=suffix_help)
args = parser.parse_args()
print(args)
global be_cautious
be_cautious = args.cautious
print(args.cautious, be_cautious)
file_list = []
if args.input[0] == sys.stdin:
file_list.append(sys.stdin)
else:
for i in args.input:
if isdir(i):
if args.recursive:
file_list += recurse_dir(i, args.filetypes, args.exclude)
else:
for t in args.filetypes:
tmp = glob.glob(i+'/*'+t)
for f in tmp:
if any(samefile(f, e) for e in args.exclude):
continue
file_list.append(f)
else:
file_list.append(i) # let's just assume they don't specify the exact same dir or file in -i and -e
print("fixing ",len(file_list), "files")
for file_num, f in enumerate(file_list):
print("fixing",normpath(f),str(file_num+1),"of",str(len(file_list)))
if f == sys.stdin:
c_file_string = f.read()
else:
try:
c_file_string = open(f, "r").read()
except:
return
for regex, fix_func in zip(regexes, fix_functions):
c_file_string = fix_construct(regex, fix_func, c_file_string)
if args.overwrite:
open(f, 'w').write(c_file_string)
elif args.suffix:
open(f+args.suffix, 'w').write(c_file_string)
else:
print(c_file_string)
if __name__ == "__main__":
main()
| |
from random import uniform
import pytest
from diofant import (E1, Chi, Ci, E, Ei, EulerGamma, Float, I, Integer, Li,
Limit, O, Rational, Shi, Si, Symbol, conjugate, cos, cosh,
diff, erf, erf2, erf2inv, erfc, erfcinv, erfi, erfinv,
exp, exp_polar, expand, expand_func, expint, fresnelc,
fresnels, gamma, hyper, im, integrate, li, limit, log,
meijerg, nan, oo, pi, polar_lift, re, root, sign, sin,
sinh, sqrt, uppergamma)
from diofant.abc import x, y, z
from diofant.core.function import ArgumentIndexError
from diofant.functions.special.error_functions import _eis, _erfs
from diofant.utilities.randtest import (random_complex_number,
verify_derivative_numerically,
verify_numerically)
__all__ = ()
w = Symbol('w', extended_real=True)
n = Symbol('n', integer=True)
def test_erf():
assert erf(nan) == nan
assert erf(oo) == 1
assert erf(-oo) == -1
assert erf(0) == 0
assert erf(I*oo) == oo*I
assert erf(-I*oo) == -oo*I
assert erf(-2) == -erf(2)
assert erf(-x*y) == -erf(x*y)
assert erf(-x - y) == -erf(x + y)
assert erf(erfinv(x)) == x
assert erf(erfcinv(x)) == 1 - x
assert erf(erf2inv(0, x)) == x
assert erf(erf2inv(0, erf(erfcinv(1 - erf(erfinv(x)))))) == x
assert erf(I).is_extended_real is False
assert erf(w).is_extended_real is True
assert erf(z).is_extended_real is None
assert conjugate(erf(z)) == erf(conjugate(z))
assert erf(x).as_leading_term(x) == 2*x/sqrt(pi)
assert erf(1/x).as_leading_term(x) == erf(1/x)
assert erf(z).rewrite('uppergamma') == sqrt(z**2)*erf(sqrt(z**2))/z
assert erf(z).rewrite('erfc') == 1 - erfc(z)
assert erf(z).rewrite('erfi') == -I*erfi(I*z)
assert erf(z).rewrite('fresnels') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('fresnelc') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('hyper') == 2*z*hyper([Rational(1, 2)], [Rational(3, 2)], -z**2)/sqrt(pi)
assert erf(z).rewrite('meijerg') == z*meijerg([Rational(1, 2)], [], [0], [Rational(-1, 2)], z**2)/sqrt(pi)
assert erf(z).rewrite('expint') == sqrt(z**2)/z - z*expint(Rational(1, 2), z**2)/sqrt(pi)
assert limit(exp(x)*exp(x**2)*(erf(x + 1/exp(x)) - erf(x)), x, oo) == \
2/sqrt(pi)
assert limit((1 - erf(z))*exp(z**2)*z, z, oo) == 1/sqrt(pi)
assert limit((1 - erf(x))*exp(x**2)*sqrt(pi)*x, x, oo) == 1
assert limit(((1 - erf(x))*exp(x**2)*sqrt(pi)*x - 1)*2*x**2, x, oo) == -1
l = Limit((1 - erf(y/x))*exp(y**2/x**2), x, 0)
assert l.doit() == l # cover _erfs._eval_aseries
assert erf(x).as_real_imag() == \
((erf(re(x) - I*re(x)*abs(im(x))/abs(re(x)))/2 +
erf(re(x) + I*re(x)*abs(im(x))/abs(re(x)))/2,
I*(erf(re(x) - I*re(x)*abs(im(x))/abs(re(x))) -
erf(re(x) + I*re(x)*abs(im(x))/abs(re(x)))) *
re(x)*abs(im(x))/(2*im(x)*abs(re(x)))))
assert erf(x).as_real_imag() == erf(x).as_real_imag(deep=False)
assert erf(w).as_real_imag() == (erf(w), 0)
assert erf(w).as_real_imag() == erf(w).as_real_imag(deep=False)
assert erf(I).as_real_imag() == (0, erfi(1))
pytest.raises(ArgumentIndexError, lambda: erf(x).fdiff(2))
assert erf(x).taylor_term(3, x, *(2*x/sqrt(pi), 0)) == -2*x**3/3/sqrt(pi)
def test_erf_series():
assert erf(x).series(x, 0, 7) == 2*x/sqrt(pi) - \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erf_evalf():
assert abs( erf(Float(2.0)) - 0.995322265 ) < 1E-8 # XXX
def test__erfs():
assert _erfs(z).diff(z) == -2/sqrt(pi) + 2*z*_erfs(z)
pytest.raises(ArgumentIndexError, lambda: _erfs(x).fdiff(2))
assert _erfs(1/z).series(z) == \
z/sqrt(pi) - z**3/(2*sqrt(pi)) + 3*z**5/(4*sqrt(pi)) + O(z**6)
assert expand(erf(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== erf(z).diff(z)
assert _erfs(z).rewrite('intractable') == (-erf(z) + 1)*exp(z**2)
def test_erfc():
assert erfc(nan) == nan
assert erfc(oo) == 0
assert erfc(-oo) == 2
assert erfc(0) == 1
assert erfc(I*oo) == -oo*I
assert erfc(-I*oo) == oo*I
assert erfc(-x) == Integer(2) - erfc(x)
assert erfc(erfcinv(x)) == x
assert erfc(erfinv(x)) == 1 - x
assert erfc(I).is_extended_real is False
assert erfc(w).is_extended_real is True
assert erfc(z).is_extended_real is None
assert conjugate(erfc(z)) == erfc(conjugate(z))
assert erfc(x).as_leading_term(x) == 1
assert erfc(1/x).as_leading_term(x) == erfc(1/x)
assert erfc(z).rewrite('erf') == 1 - erf(z)
assert erfc(z).rewrite('erfi') == 1 + I*erfi(I*z)
assert erfc(z).rewrite('fresnels') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('fresnelc') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('hyper') == 1 - 2*z*hyper([Rational(1, 2)], [Rational(3, 2)], -z**2)/sqrt(pi)
assert erfc(z).rewrite('meijerg') == 1 - z*meijerg([Rational(1, 2)], [], [0], [Rational(-1, 2)], z**2)/sqrt(pi)
assert erfc(z).rewrite('uppergamma') == 1 - sqrt(z**2)*erf(sqrt(z**2))/z
assert erfc(z).rewrite('expint') == 1 - sqrt(z**2)/z + z*expint(Rational(1, 2), z**2)/sqrt(pi)
assert erfc(x).as_real_imag() == \
((erfc(re(x) - I*re(x)*abs(im(x))/abs(re(x)))/2 +
erfc(re(x) + I*re(x)*abs(im(x))/abs(re(x)))/2,
I*(erfc(re(x) - I*re(x)*abs(im(x))/abs(re(x))) -
erfc(re(x) + I*re(x)*abs(im(x))/abs(re(x)))) *
re(x)*abs(im(x))/(2*im(x)*abs(re(x)))))
assert erfc(x).as_real_imag(deep=False) == erfc(x).as_real_imag()
assert erfc(w).as_real_imag() == (erfc(w), 0)
assert erfc(w).as_real_imag(deep=False) == erfc(w).as_real_imag()
assert erfc(I).as_real_imag() == (1, -erfi(1))
pytest.raises(ArgumentIndexError, lambda: erfc(x).fdiff(2))
assert erfc(x).taylor_term(3, x, *(-2*x/sqrt(pi), 0)) == 2*x**3/3/sqrt(pi)
assert erfc(x).limit(x, oo) == 0
assert erfc(x).diff(x) == -2*exp(-x**2)/sqrt(pi)
def test_erfc_series():
assert erfc(x).series(x, 0, 7) == 1 - 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) - x**5/5/sqrt(pi) + O(x**7)
def test_erfc_evalf():
assert abs( erfc(Float(2.0)) - 0.00467773 ) < 1E-8 # XXX
def test_erfi():
assert erfi(nan) == nan
assert erfi(+oo) == +oo
assert erfi(-oo) == -oo
assert erfi(0) == 0
assert erfi(I*oo) == I
assert erfi(-I*oo) == -I
assert erfi(-x) == -erfi(x)
assert erfi(I*erfinv(x)) == I*x
assert erfi(I*erfcinv(x)) == I*(1 - x)
assert erfi(I*erf2inv(0, x)) == I*x
assert erfi(I).is_extended_real is False
assert erfi(w).is_extended_real is True
assert erfi(z).is_extended_real is None
assert conjugate(erfi(z)) == erfi(conjugate(z))
assert erfi(z).rewrite('erf') == -I*erf(I*z)
assert erfi(z).rewrite('erfc') == I*erfc(I*z) - I
assert erfi(z).rewrite('fresnels') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('fresnelc') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('hyper') == 2*z*hyper([Rational(1, 2)], [Rational(3, 2)], z**2)/sqrt(pi)
assert erfi(z).rewrite('meijerg') == z*meijerg([Rational(1, 2)], [], [0], [Rational(-1, 2)], -z**2)/sqrt(pi)
assert erfi(z).rewrite('uppergamma') == (sqrt(-z**2)/z*(uppergamma(Rational(1, 2),
-z**2)/sqrt(pi) - 1))
assert erfi(z).rewrite('expint') == sqrt(-z**2)/z - z*expint(Rational(1, 2), -z**2)/sqrt(pi)
assert erfi(x).as_real_imag() == \
((erfi(re(x) - I*re(x)*abs(im(x))/abs(re(x)))/2 +
erfi(re(x) + I*re(x)*abs(im(x))/abs(re(x)))/2,
I*(erfi(re(x) - I*re(x)*abs(im(x))/abs(re(x))) -
erfi(re(x) + I*re(x)*abs(im(x))/abs(re(x)))) *
re(x)*abs(im(x))/(2*im(x)*abs(re(x)))))
assert erfi(x).as_real_imag(deep=False) == erfi(x).as_real_imag()
assert erfi(w).as_real_imag() == (erfi(w), 0)
assert erfi(w).as_real_imag(deep=False) == erfi(w).as_real_imag()
assert erfi(I).as_real_imag() == (0, erf(1))
pytest.raises(ArgumentIndexError, lambda: erfi(x).fdiff(2))
assert erfi(x).taylor_term(3, x, *(2*x/sqrt(pi), 0)) == 2*x**3/3/sqrt(pi)
assert erfi(x).limit(x, oo) == oo
def test_erfi_series():
assert erfi(x).series(x, 0, 7) == 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erfi_evalf():
assert abs( erfi(Float(2.0)) - 18.5648024145756 ) < 1E-13 # XXX
def test_erf2():
assert erf2(0, 0) == 0
assert erf2(x, x) == 0
assert erf2(nan, 0) == nan
assert erf2(-oo, y) == erf(y) + 1
assert erf2( oo, y) == erf(y) - 1
assert erf2( x, oo) == 1 - erf(x)
assert erf2( x, -oo) == -1 - erf(x)
assert erf2(x, erf2inv(x, y)) == y
assert erf2(-x, -y) == -erf2(x, y)
assert erf2(-x, y) == erf(y) + erf(x)
assert erf2( x, -y) == -erf(y) - erf(x)
assert erf2(x, y).rewrite('fresnels') == erf(y).rewrite(fresnels)-erf(x).rewrite(fresnels)
assert erf2(x, y).rewrite('fresnelc') == erf(y).rewrite(fresnelc)-erf(x).rewrite(fresnelc)
assert erf2(x, y).rewrite('hyper') == erf(y).rewrite(hyper)-erf(x).rewrite(hyper)
assert erf2(x, y).rewrite('meijerg') == erf(y).rewrite(meijerg)-erf(x).rewrite(meijerg)
assert erf2(x, y).rewrite('uppergamma') == erf(y).rewrite(uppergamma) - erf(x).rewrite(uppergamma)
assert erf2(x, y).rewrite('expint') == erf(y).rewrite(expint)-erf(x).rewrite(expint)
assert erf2(I, w).is_extended_real is False
assert erf2(2*w, w).is_extended_real is True
assert erf2(z, w).is_extended_real is None
assert erf2(w, z).is_extended_real is None
assert conjugate(erf2(x, y)) == erf2(conjugate(x), conjugate(y))
assert erf2(x, y).rewrite('erf') == erf(y) - erf(x)
assert erf2(x, y).rewrite('erfc') == erfc(x) - erfc(y)
assert erf2(x, y).rewrite('erfi') == I*(erfi(I*x) - erfi(I*y))
pytest.raises(ArgumentIndexError, lambda: erfi(x).fdiff(3))
pytest.raises(ArgumentIndexError, lambda: erf2(x, y).fdiff(3))
assert erf2(x, y).diff(x) == -2*exp(-x**2)/sqrt(pi)
assert erf2(x, y).diff(y) == +2*exp(-y**2)/sqrt(pi)
def test_erfinv():
assert erfinv(0) == 0
assert erfinv(-1) == -oo
assert erfinv(+1) == +oo
assert erfinv(nan) == nan
assert erfinv(erf(w)) == w
assert erfinv(erf(-w)) == -w
assert erfinv(x).diff() == sqrt(pi)*exp(erfinv(x)**2)/2
assert erfinv(z).rewrite('erfcinv') == erfcinv(1-z)
pytest.raises(ArgumentIndexError, lambda: erfinv(x).fdiff(2))
def test_erfinv_evalf():
assert abs( erfinv(Float(0.2)) - 0.179143454621292 ) < 1E-13
def test_erfcinv():
assert erfcinv(1) == 0
assert erfcinv(0) == oo
assert erfcinv(nan) == nan
assert erfcinv(x).diff() == -sqrt(pi)*exp(erfcinv(x)**2)/2
assert erfcinv(z).rewrite('erfinv') == erfinv(1-z)
pytest.raises(ArgumentIndexError, lambda: erfcinv(x).fdiff(2))
def test_erf2inv():
assert erf2inv(0, 0) == 0
assert erf2inv(0, 1) == oo
assert erf2inv(1, 0) == 1
assert erf2inv(0, y) == erfinv(y)
assert erf2inv(oo, y) == erfcinv(-y)
assert erf2inv(x, 0) == x
assert erf2inv(x, oo) == erfinv(x)
assert erf2inv(x, y).diff(x) == exp(-x**2 + erf2inv(x, y)**2)
assert erf2inv(x, y).diff(y) == sqrt(pi)*exp(erf2inv(x, y)**2)/2
pytest.raises(ArgumentIndexError, lambda: erf2inv(x, y).fdiff(3))
# NOTE we multiply by exp_polar(I*pi) and need this to be on the principal
# branch, hence take x in the lower half plane (d=0).
def mytn(expr1, expr2, expr3, x, d=0):
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr2 == expr3 and verify_numerically(expr1.subs(subs),
expr2.subs(subs), x, d=d)
def mytd(expr1, expr2, x):
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr1.diff(x) == expr2 and verify_derivative_numerically(expr1.subs(subs), x)
def tn_branch(func, s=None):
def fn(x):
if s is None:
return func(x)
return func(s, x)
c = uniform(1, 5)
expr = fn(c*exp_polar(I*pi)) - fn(c*exp_polar(-I*pi))
eps = 1e-15
expr2 = fn(-c + eps*I) - fn(-c - eps*I)
return abs(expr - expr2).evalf(strict=False) < 1e-10
def test_ei():
pos = Symbol('p', positive=True)
neg = Symbol('n', negative=True)
assert Ei(0) == -oo
assert Ei(+oo) == oo
assert Ei(-oo) == 0
assert Ei(-pos) == Ei(polar_lift(-1)*pos) - I*pi
assert Ei(neg) == Ei(polar_lift(neg)) - I*pi
assert tn_branch(Ei)
assert mytd(Ei(x), exp(x)/x, x)
assert mytn(Ei(x), Ei(x).rewrite(uppergamma),
-uppergamma(0, x*polar_lift(-1)) - I*pi, x)
assert mytn(Ei(x), Ei(x).rewrite(expint),
-expint(1, x*polar_lift(-1)) - I*pi, x)
assert Ei(x).rewrite(expint).rewrite(Ei) == Ei(x)
assert Ei(x*exp_polar(2*I*pi)) == Ei(x) + 2*I*pi
assert Ei(x*exp_polar(-2*I*pi)) == Ei(x) - 2*I*pi
assert mytn(Ei(x), Ei(x).rewrite(Shi), Chi(x) + Shi(x), x)
assert mytn(Ei(x*polar_lift(I)), Ei(x*polar_lift(I)).rewrite(Si),
Ci(x) + I*Si(x) + I*pi/2, x)
assert Ei(log(x)).rewrite(li) == li(x)
assert Ei(2*log(x)).rewrite(li) == li(x**2)
assert Ei(x).series(x) == (EulerGamma + log(x) + x + x**2/4 +
x**3/18 + x**4/96 + x**5/600 + O(x**6))
assert Ei(1 + x).series(x) == (Ei(1) + E*x + E*x**3/6 - E*x**4/12 +
3*E*x**5/40 + O(x**6))
pytest.raises(ArgumentIndexError, lambda: Ei(x).fdiff(2))
def test_expint():
assert mytn(expint(x, y), expint(x, y).rewrite(uppergamma),
y**(x - 1)*uppergamma(1 - x, y), x)
assert mytd(
expint(x, y), -y**(x - 1)*meijerg([], [1, 1], [0, 0, 1 - x], [], y), x)
assert mytd(expint(x, y), -expint(x - 1, y), y)
assert mytn(expint(1, x), expint(1, x).rewrite(Ei),
-Ei(x*polar_lift(-1)) + I*pi, x)
assert expint(-4, x) == exp(-x)/x + 4*exp(-x)/x**2 + 12*exp(-x)/x**3 \
+ 24*exp(-x)/x**4 + 24*exp(-x)/x**5
assert expint(-Rational(3, 2), x) == \
exp(-x)/x + 3*exp(-x)/(2*x**2) - 3*sqrt(pi)*erf(sqrt(x))/(4*x**Rational(5, 2)) \
+ 3*sqrt(pi)/(4*x**Rational(5, 2))
assert tn_branch(expint, 1)
assert tn_branch(expint, 2)
assert tn_branch(expint, 3)
assert tn_branch(expint, 1.7)
assert tn_branch(expint, pi)
assert expint(y, x*exp_polar(2*I*pi)) == \
x**(y - 1)*(exp(2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(y, x*exp_polar(-2*I*pi)) == \
x**(y - 1)*(exp(-2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(2, x*exp_polar(2*I*pi)) == 2*I*pi*x + expint(2, x)
assert expint(2, x*exp_polar(-2*I*pi)) == -2*I*pi*x + expint(2, x)
assert (expint(n, x*exp_polar(2*I*pi)) ==
expint(n, x*exp_polar(2*I*pi), evaluate=False))
assert expint(1, x).rewrite(Ei).rewrite(expint) == expint(1, x)
assert (expint(2, x, evaluate=False).rewrite(Shi) ==
expint(2, x, evaluate=False))
assert mytn(E1(x), E1(x).rewrite(Shi), Shi(x) - Chi(x), x)
assert mytn(E1(polar_lift(I)*x), E1(polar_lift(I)*x).rewrite(Si),
-Ci(x) + I*Si(x) - I*pi/2, x)
assert mytn(expint(2, x), expint(2, x).rewrite(Ei).rewrite(expint),
-x*E1(x) + exp(-x), x)
assert mytn(expint(3, x), expint(3, x).rewrite(Ei).rewrite(expint),
x**2*E1(x)/2 + (1 - x)*exp(-x)/2, x)
assert expint(Rational(3, 2), z).nseries(z, n=10) == \
2 + 2*z - z**2/3 + z**3/15 - z**4/84 + z**5/540 - \
2*sqrt(pi)*sqrt(z) + O(z**6)
assert E1(z).series(z) == -EulerGamma - log(z) + z - \
z**2/4 + z**3/18 - z**4/96 + z**5/600 + O(z**6)
assert expint(4, z).series(z) == Rational(1, 3) - z/2 + z**2/2 + \
z**3*(log(z)/6 - Rational(11, 36) + EulerGamma/6) - z**4/24 + \
z**5/240 + O(z**6)
assert (expint(x, x).series(x, x0=1, n=2) ==
expint(1, 1) + (x - 1)*(-meijerg(((), (1, 1)),
((0, 0, 0), ()), 1) - 1/E) +
O((x - 1)**2, (x, 1)))
pytest.raises(ArgumentIndexError, lambda: expint(x, y).fdiff(3))
def test__eis():
assert _eis(z).diff(z) == -_eis(z) + 1/z
pytest.raises(ArgumentIndexError, lambda: _eis(x).fdiff(2))
assert _eis(1/z).series(z) == \
z + z**2 + 2*z**3 + 6*z**4 + 24*z**5 + O(z**6)
assert Ei(z).rewrite('tractable') == exp(z)*_eis(z)
assert li(z).rewrite('tractable') == z*_eis(log(z))
assert _eis(z).rewrite('intractable') == exp(-z)*Ei(z)
assert expand(li(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== li(z).diff(z)
assert expand(Ei(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== Ei(z).diff(z)
assert _eis(z).series(z, n=2) == EulerGamma + log(z) + z*(-log(z) -
EulerGamma + 1) + z**2*(log(z)/2 - Rational(3, 4) + EulerGamma/2) + O(z**2)
l = Limit(Ei(y/x)/exp(y/x), x, 0)
assert l.doit() == l # cover _eis._eval_aseries
def tn_arg(func):
def test(arg, e1, e2):
v = uniform(1, 5)
v1 = func(arg*x).subs({x: v}).evalf(strict=False)
v2 = func(e1*v + e2*1e-15).evalf(strict=False)
return abs(v1 - v2).evalf(strict=False) < 1e-10
return test(exp_polar(I*pi/2), I, 1) and \
test(exp_polar(-I*pi/2), -I, 1) and \
test(exp_polar(I*pi), -1, I) and \
test(exp_polar(-I*pi), -1, -I)
def test_li():
z = Symbol('z')
zr = Symbol('z', extended_real=True)
zp = Symbol('z', positive=True)
zn = Symbol('z', negative=True)
assert li(0) == 0
assert li(1) == -oo
assert li(oo) == oo
assert isinstance(li(z), li)
assert diff(li(z), z) == 1/log(z)
pytest.raises(ArgumentIndexError, lambda: li(z).fdiff(2))
assert conjugate(li(z)) == li(conjugate(z))
assert conjugate(li(-zr)) == li(-zr)
assert conjugate(li(-zp)) == conjugate(li(-zp))
assert conjugate(li(zn)) == conjugate(li(zn))
assert li(z).rewrite(Li) == Li(z) + li(2)
assert li(z).rewrite(Ei) == Ei(log(z))
assert li(z).rewrite(uppergamma) == (-log(1/log(z))/2 - log(-log(z)) +
log(log(z))/2 - expint(1, -log(z)))
assert li(z).rewrite(Si) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Ci) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Shi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(Chi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(hyper) == (log(z)*hyper((1, 1), (2, 2), log(z)) -
log(1/log(z))/2 + log(log(z))/2 + EulerGamma)
assert li(z).rewrite(meijerg) == (-log(1/log(z))/2 - log(-log(z)) + log(log(z))/2 -
meijerg(((), (1,)), ((0, 0), ()), -log(z)))
def test_Li():
assert Li(2) == 0
assert Li(oo) == oo
assert isinstance(Li(z), Li)
assert diff(Li(z), z) == 1/log(z)
pytest.raises(ArgumentIndexError, lambda: Li(z).fdiff(2))
assert Li(z).rewrite(li) == li(z) - li(2)
assert Li(4).evalf(30) == Float('1.92242131492155809316615998937961', dps=30)
def test_si():
assert Si(I*x) == I*Shi(x)
assert Shi(I*x) == I*Si(x)
assert Si(-I*x) == -I*Shi(x)
assert Shi(-I*x) == -I*Si(x)
assert Si(-x) == -Si(x)
assert Shi(-x) == -Shi(x)
assert Si(exp_polar(2*pi*I)*x) == Si(x)
assert Si(exp_polar(-2*pi*I)*x) == Si(x)
assert Shi(exp_polar(2*pi*I)*x) == Shi(x)
assert Shi(exp_polar(-2*pi*I)*x) == Shi(x)
assert Si(oo) == pi/2
assert Si(-oo) == -pi/2
assert Shi(oo) == oo
assert Shi(-oo) == -oo
assert mytd(Si(x), sin(x)/x, x)
assert mytd(Shi(x), sinh(x)/x, x)
assert mytn(Si(x), Si(x).rewrite(Ei),
-I*(-Ei(x*exp_polar(-I*pi/2))/2
+ Ei(x*exp_polar(I*pi/2))/2 - I*pi) + pi/2, x)
assert mytn(Si(x), Si(x).rewrite(expint),
-I*(-expint(1, x*exp_polar(-I*pi/2))/2 +
expint(1, x*exp_polar(I*pi/2))/2) + pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(Ei),
Ei(x)/2 - Ei(x*exp_polar(I*pi))/2 + I*pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(expint),
expint(1, x)/2 - expint(1, x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Si)
assert tn_arg(Shi)
assert Si(x).nseries(x, n=8) == \
x - x**3/18 + x**5/600 - x**7/35280 + O(x**9)
assert Shi(x).nseries(x, n=8) == \
x + x**3/18 + x**5/600 + x**7/35280 + O(x**9)
assert Si(sin(x)).nseries(x, n=5) == x - 2*x**3/9 + 17*x**5/450 + O(x**7)
assert Si(x).series(x, 1, n=3) == \
Si(1) + (x - 1)*sin(1) + (x - 1)**2*(-sin(1)/2 + cos(1)/2) + O((x - 1)**3, (x, 1))
pytest.raises(ArgumentIndexError, lambda: Si(z).fdiff(2))
def test_ci():
m1 = exp_polar(I*pi)
m1_ = exp_polar(-I*pi)
pI = exp_polar(I*pi/2)
mI = exp_polar(-I*pi/2)
assert Ci(m1*x) == Ci(x) + I*pi
assert Ci(m1_*x) == Ci(x) - I*pi
assert Ci(pI*x) == Chi(x) + I*pi/2
assert Ci(mI*x) == Chi(x) - I*pi/2
assert Chi(m1*x) == Chi(x) + I*pi
assert Chi(m1_*x) == Chi(x) - I*pi
assert Chi(pI*x) == Ci(x) + I*pi/2
assert Chi(mI*x) == Ci(x) - I*pi/2
assert Ci(exp_polar(2*I*pi)*x) == Ci(x) + 2*I*pi
assert Chi(exp_polar(-2*I*pi)*x) == Chi(x) - 2*I*pi
assert Chi(exp_polar(2*I*pi)*x) == Chi(x) + 2*I*pi
assert Ci(exp_polar(-2*I*pi)*x) == Ci(x) - 2*I*pi
assert Ci(oo) == 0
assert Ci(-oo) == I*pi
assert Chi(oo) == oo
assert Chi(-oo) == oo
assert mytd(Ci(x), cos(x)/x, x)
assert mytd(Chi(x), cosh(x)/x, x)
assert mytn(Ci(x), Ci(x).rewrite(Ei),
Ei(x*exp_polar(-I*pi/2))/2 + Ei(x*exp_polar(I*pi/2))/2, x)
assert mytn(Chi(x), Chi(x).rewrite(Ei),
Ei(x)/2 + Ei(x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Ci)
assert tn_arg(Chi)
assert Ci(x).nseries(x, n=4) == \
EulerGamma + log(x) - x**2/4 + x**4/96 + O(x**6)
assert Chi(x).nseries(x, n=4) == \
EulerGamma + log(x) + x**2/4 + x**4/96 + O(x**6)
assert limit(log(x) - Ci(2*x), x, 0) == -log(2) - EulerGamma
def test_fresnel():
assert fresnels(0) == 0
assert fresnels(+oo) == Rational(+1, 2)
assert fresnels(-oo) == Rational(-1, 2)
assert fresnels(z) == fresnels(z)
assert fresnels(-z) == -fresnels(z)
assert fresnels(I*z) == -I*fresnels(z)
assert fresnels(-I*z) == I*fresnels(z)
assert conjugate(fresnels(z)) == fresnels(conjugate(z))
assert fresnels(z).diff(z) == sin(pi*z**2/2)
assert fresnels(z).rewrite(erf) == (1 + I)/4 * (
erf((1 + I)/2*sqrt(pi)*z) - I*erf((1 - I)/2*sqrt(pi)*z))
assert fresnels(z).rewrite(hyper) == \
pi*z**3/6 * hyper([Rational(3, 4)], [Rational(3, 2), Rational(7, 4)], -pi**2*z**4/16)
assert fresnels(z).series(z, n=15) == \
pi*z**3/6 - pi**3*z**7/336 + pi**5*z**11/42240 + O(z**15)
assert fresnels(y/z).limit(z, 0) == fresnels(oo*sign(y))
assert fresnels(x).taylor_term(-1, z) == 0
assert fresnels(x).taylor_term(1, z, *(pi*z**3/6,)) == -pi**3*z**7/336
assert fresnels(x).taylor_term(1, z) == -pi**3*z**7/336
assert fresnels(w).is_extended_real is True
assert fresnels(z).is_extended_real is None
assert fresnels(z).as_real_imag() == \
((fresnels(re(z) - I*re(z)*abs(im(z))/abs(re(z)))/2 +
fresnels(re(z) + I*re(z)*abs(im(z))/abs(re(z)))/2,
I*(fresnels(re(z) - I*re(z)*abs(im(z))/abs(re(z))) -
fresnels(re(z) + I*re(z)*abs(im(z))/abs(re(z)))) *
re(z)*abs(im(z))/(2*im(z)*abs(re(z)))))
assert fresnels(z).as_real_imag(deep=False) == fresnels(z).as_real_imag()
assert fresnels(w).as_real_imag() == (fresnels(w), 0)
assert fresnels(w).as_real_imag(deep=False) == fresnels(w).as_real_imag()
assert (fresnels(I, evaluate=False).as_real_imag() ==
(0, -erf(sqrt(pi)/2 + I*sqrt(pi)/2)/4 +
I*(-erf(sqrt(pi)/2 + I*sqrt(pi)/2) + erf(sqrt(pi)/2 -
I*sqrt(pi)/2))/4 - erf(sqrt(pi)/2 - I*sqrt(pi)/2)/4))
assert fresnels(2 + 3*I).as_real_imag() == (
fresnels(2 + 3*I)/2 + fresnels(2 - 3*I)/2,
I*(fresnels(2 - 3*I) - fresnels(2 + 3*I))/2
)
assert expand_func(integrate(fresnels(z), z)) == \
z*fresnels(z) + cos(pi*z**2/2)/pi
assert fresnels(z).rewrite(meijerg) == sqrt(2)*pi*z**Rational(9, 4) * \
meijerg(((), (1,)), ((Rational(3, 4),),
(Rational(1, 4), 0)), -pi**2*z**4/16)/(2*(-z)**Rational(3, 4)*(z**2)**Rational(3, 4))
assert fresnelc(0) == 0
assert fresnelc(+oo) == Rational(+1, 2)
assert fresnelc(-oo) == Rational(-1, 2)
assert fresnelc(z) == fresnelc(z)
assert fresnelc(-z) == -fresnelc(z)
assert fresnelc(I*z) == I*fresnelc(z)
assert fresnelc(-I*z) == -I*fresnelc(z)
assert conjugate(fresnelc(z)) == fresnelc(conjugate(z))
assert fresnelc(z).diff(z) == cos(pi*z**2/2)
pytest.raises(ArgumentIndexError, lambda: fresnels(z).fdiff(2))
pytest.raises(ArgumentIndexError, lambda: fresnelc(z).fdiff(2))
assert fresnelc(z).rewrite(erf) == (1 - I)/4 * (
erf((1 + I)/2*sqrt(pi)*z) + I*erf((1 - I)/2*sqrt(pi)*z))
assert fresnelc(z).rewrite(hyper) == \
z * hyper([Rational(1, 4)], [Rational(1, 2), Rational(5, 4)], -pi**2*z**4/16)
assert fresnelc(x).taylor_term(-1, z) == 0
assert fresnelc(x).taylor_term(1, z, *(z,)) == -pi**2*z**5/40
assert fresnelc(x).taylor_term(1, z) == -pi**2*z**5/40
assert fresnelc(z).series(z, n=15) == \
z - pi**2*z**5/40 + pi**4*z**9/3456 - pi**6*z**13/599040 + O(z**15)
assert fresnelc(y/z).limit(z, 0) == fresnelc(oo*sign(y))
# issue sympy/sympy#6510
assert fresnels(z).series(z, oo) == \
(-1/(pi**2*z**3) + O(z**(-6), (z, oo)))*sin(pi*z**2/2) + \
(3/(pi**3*z**5) - 1/(pi*z) + O(z**(-6), (z, oo)))*cos(pi*z**2/2) + Rational(1, 2)
assert fresnelc(z).series(z, oo) == \
(-1/(pi**2*z**3) + O(z**(-6), (z, oo)))*cos(pi*z**2/2) + \
(-3/(pi**3*z**5) + 1/(pi*z) + O(z**(-6), (z, oo)))*sin(pi*z**2/2) + Rational(1, 2)
assert fresnels(1/z).series(z) == \
(-z**3/pi**2 + O(z**6))*sin(pi/(2*z**2)) + (-z/pi + 3*z**5/pi**3 +
O(z**6))*cos(pi/(2*z**2)) + Rational(1, 2)
assert fresnelc(1/z).series(z) == \
(-z**3/pi**2 + O(z**6))*cos(pi/(2*z**2)) + (z/pi - 3*z**5/pi**3 +
O(z**6))*sin(pi/(2*z**2)) + Rational(1, 2)
assert fresnelc(w).is_extended_real is True
assert fresnelc(z).as_real_imag() == \
((fresnelc(re(z) - I*re(z)*abs(im(z))/abs(re(z)))/2 +
fresnelc(re(z) + I*re(z)*abs(im(z))/abs(re(z)))/2,
I*(fresnelc(re(z) - I*re(z)*abs(im(z))/abs(re(z))) -
fresnelc(re(z) + I*re(z)*abs(im(z))/abs(re(z)))) *
re(z)*abs(im(z))/(2*im(z)*abs(re(z)))))
assert fresnelc(2 + 3*I).as_real_imag() == (
fresnelc(2 - 3*I)/2 + fresnelc(2 + 3*I)/2,
I*(fresnelc(2 - 3*I) - fresnelc(2 + 3*I))/2
)
assert expand_func(integrate(fresnelc(z), z)) == \
z*fresnelc(z) - sin(pi*z**2/2)/pi
assert fresnelc(z).rewrite(meijerg) == sqrt(2)*pi*z**Rational(3, 4) * \
meijerg(((), (1,)), ((Rational(1, 4),),
(Rational(3, 4), 0)), -pi**2*z**4/16)/(2*root(-z, 4)*root(z**2, 4))
verify_numerically(re(fresnels(z)), fresnels(z).as_real_imag()[0], z)
verify_numerically(im(fresnels(z)), fresnels(z).as_real_imag()[1], z)
verify_numerically(fresnels(z), fresnels(z).rewrite(hyper), z)
verify_numerically(fresnels(z), fresnels(z).rewrite(meijerg), z)
verify_numerically(re(fresnelc(z)), fresnelc(z).as_real_imag()[0], z)
verify_numerically(im(fresnelc(z)), fresnelc(z).as_real_imag()[1], z)
verify_numerically(fresnelc(z), fresnelc(z).rewrite(hyper), z)
verify_numerically(fresnelc(z), fresnelc(z).rewrite(meijerg), z)
| |
from __future__ import absolute_import
import os
import os.path
import json
import time
import pkgutil
import warnings
from datetime import datetime
import cherrypy
from cherrypy.wsgiserver.wsgiserver2 import CherryPyWSGIServer
from cherrypy.process.servers import ServerAdapter
# from cherrypy import _cperror, _cplogging
from ensconce import exc
from ensconce.config import config, init_app
from ensconce.autolog import log
from ensconce.model import meta
from ensconce.cya import auditlog
from ensconce.webapp import util, tree, tasks
from ensconce.auth import get_configured_providers
def error_handler(status, message, traceback, version):
if cherrypy.request.headers.get('Accept') == 'application/json':
return json.dumps({'error': {'code': status, 'message':message}})
else:
return util.render('error.html', {'status': status,
'traceback': traceback,
'message': message,
'version': version})
# Kinda a kludge to capture that we really only want to run configure() once
# (This is relevant for testing.)
configured = False
def configure():
"""
Configures the cherrypy server (sets up the tree, cherrypy config, etc.).
"""
global configured
# Setup the session storage directory if it does not exist
if config.get('sessions.on') and config.get('sessions.storage_type') == 'file':
path = config['sessions.storage_path']
if not os.path.exists(path):
try:
os.makedirs(path) # By default these will be 0777
except:
warnings.warn("Unable to create the session directory: {0}".format(path))
cherrypy.config.update({
"server.socket_host": config['server.socket_host'],
"server.socket_port": config['server.socket_port'],
"checker.on": False,
"log.screen": False,
#"engine.autoreload_on": False,
"engine.autoreload_on": config.as_bool("debug"),
"tools.sessions.on": config.as_bool('sessions.on'),
"tools.sessions.persistent": config.as_bool('sessions.persistent'),
"tools.sessions.path": config['sessions.path'],
"tools.sessions.timeout": config['sessions.timeout'],
"tools.sessions.storage_type": config['sessions.storage_type'],
"tools.sessions.storage_path": config['sessions.storage_path'],
"tools.sessions.secure": config['sessions.secure'],
"request.show_tracebacks": config.as_bool("debug"),
"checker.on": False,
"tools.caching.on": False,
"tools.expires.on": True,
"tools.expires.secs": 0,
"tools.expires.force": True,
"tools.log_headers.on": False,
"engine.autoreload_on": config.as_bool("debug"),
"tools.encode.on": True,
"tools.encode.encoding": "utf8",
"error_page.default": error_handler
})
if config['server.behind_proxy']:
cherrypy.config.update({"tools.proxy.on": True})
if config['server.ssl_certificate']:
# Make this conditional so we can host behind apache?
cherrypy.config.update({
"server.ssl_certificate": config['server.ssl_certificate'],
"server.ssl_private_key": config['server.ssl_private_key'],
"server.ssl_certificate_chain": config['server.ssl_certificate_chain'],
})
def rollback_dbsession():
log.info("Rolling back SA transaction.")
session = meta.Session()
session.rollback()
def commit_dbsession():
log.info("Committing SA transaction.")
session = meta.Session()
session.commit()
cherrypy.tools.dbsession_rollback = cherrypy.Tool('before_error_response', rollback_dbsession)
cherrypy.tools.dbsession_commit = cherrypy.Tool('on_end_resource', commit_dbsession)
# This is a "flow-control" exception.
class _LoginFailed(Exception):
pass
# TODO: Refactor to combine with the ensconce.webapp.tree methods
def checkpassword(realm, username, password):
auth_providers = get_configured_providers()
try:
for auth_provider in auth_providers:
try:
auth_provider.authenticate(username, password)
except exc.InsufficientPrivileges:
# Fail fast in this case; we don't want to continue on to try other authenticators.
raise _LoginFailed()
except exc.AuthError:
# Swallow other auth errors so it goes onto next authenticator in the list.
pass
except:
# Other exceptions needs to get logged at least.
log.exception("Unexpected error authenticating user using {0!r}".format(auth_provider))
else:
log.info("Authentication succeeded for username {0} using provider {1}".format(username, auth_provider))
break
else:
log.debug("Authenticators exhausted; login failed.")
raise _LoginFailed()
except _LoginFailed:
auditlog.log(auditlog.CODE_AUTH_FAILED, comment=username)
return False
else:
# Resolve the user using the *current value* for auth_provider (as that is the one that passed the auth.
user = auth_provider.resolve_user(username)
log.debug("Setting up cherrypy session with username={0}, user_id={1}".format(username, user.id))
cherrypy.session['username'] = username # @UndefinedVariable
cherrypy.session['user_id'] = user.id # @UndefinedVariable
auditlog.log(auditlog.CODE_AUTH_LOGIN)
return True
app_conf = {
"/static": {
"tools.staticdir.on": True,
"tools.staticdir.dir": config['static_dir'],
"tools.staticdir.index": "index.html"
},
"/jsonrpc": {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'api',
'tools.auth_basic.checkpassword': checkpassword,
}
}
# Add a plugin that will run the Crypto.Random.atfork() method, since this must
# be called after forking (and we run this as a daemon in production)
util.RNGInitializer(cherrypy.engine).subscribe()
# Wire up our daemon tasks
background_tasks = []
if config.get('sessions.on'):
background_tasks.append(tasks.DaemonTask(tasks.remove_old_session_files, interval=60))
if config.get('backups.on'):
backup_interval = config['backups.interval_minutes'] * 60
background_tasks.append(tasks.DaemonTask(tasks.backup_database, interval=backup_interval, wait_first=True))
background_tasks.append(tasks.DaemonTask(tasks.remove_old_backups, interval=3600, wait_first=True)) # This checks a day-granularity interval internally.
# Unsubscribe anything that is already there, so that this method is idempotent
# (This surfaces as nasty bugs in testing otherwise.)
for channel in cherrypy.engine.listeners:
for callback in cherrypy.engine.listeners[channel]:
log.debug("Unsubscribing {0}:{1!r}".format(channel, callback))
# log.debug("Unsubscribing {0}:{1!r}".format(channel, callback))
# cherrypy.engine.unsubscribe(channel, callback)
for task in background_tasks:
cherrypy.engine.subscribe("start", task.start, priority=99)
cherrypy.engine.subscribe("stop", task.stop)
# Setup the basic/top-level webapp API
root = tree.Root()
# Iterate over all the modules in the ensconce.webapp.tree package and add
# their 'Root' classes to the tree
pkgpath = os.path.dirname(tree.__file__)
for modname in [name for (_, name, _) in pkgutil.iter_modules([pkgpath])]:
module = __import__("ensconce.webapp.tree." + modname, fromlist=["Root"])
module_root = module.Root()
setattr(root, modname, module_root)
# I think this is here because we want to explicitly specify the ServerAdapter below
# rather than use a default one.
cherrypy.server.unsubscribe()
app = cherrypy.tree.mount(root, "/", app_conf)
app.log.error_log.level = cherrypy.log.error_log.level # @UndefinedVariable
app.log.access_log.level = cherrypy.log.access_log.level # @UndefinedVariable
addr = (config["server.socket_host"], config["server.socket_port"])
server = CherryPyWSGIServer(addr, app, numthreads=50, timeout=2) # TODO: make numthreads and keepalive timeout configurable
# TODO: This is also mentioned in the cherrypy config above .... ? One of these is probably redundant.
server.ssl_certificate = config["server.ssl_certificate"]
server.ssl_private_key = config["server.ssl_private_key"]
if config["server.ssl_certificate_chain"]:
server.ssl_certificate_chain = config["server.ssl_certificate_chain"]
adapter = ServerAdapter(cherrypy.engine, server, server.bind_addr)
adapter.subscribe()
configured = True
def serve_forever():
"""
Run the [already-configured] cherrypy server forever.
"""
cherrypy.engine.start()
try:
cherrypy.engine.block()
except KeyboardInterrupt:
log.info("shutting down due to KeyboardInterrupt")
def main():
"""
Main entrypoint script, will initialize the application, configure server and serve (blocking).
"""
init_app()
configure()
serve_forever()
if __name__ == u"__main__":
main()
| |
# -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
try:
import pygraphviz as pgv
except ImportError:
pgv = None
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1', '127.0.0.1', '::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.env.http_x_forwarded_for or request.is_https:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1") and \
(request.function != 'manage'):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if request.function == 'manage':
if not 'auth' in globals() or not request.args:
redirect(URL(request.controller, 'index'))
manager_action = auth.settings.manager_actions.get(request.args(0), None)
if manager_action is None and request.args(0) == 'auth':
manager_action = dict(role=auth.settings.auth_manager_role,
heading=T('Manage Access Control'),
tables=[auth.table_user(),
auth.table_group(),
auth.table_permission()])
manager_role = manager_action.get('role', None) if manager_action else None
auth.requires_membership(manager_role)(lambda: None)()
menu = False
elif (request.application == 'admin' and not session.authorized) or \
(request.application != 'admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index',
vars=dict(send=URL(args=request.args, vars=request.vars))))
else:
response.subtitle = T('Database Administration (appadmin)')
menu = True
ignore_rw = True
response.view = 'appadmin.html'
if menu:
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
if False and request.tickets_db:
from gluon.restricted import TicketStorage
ts = TicketStorage()
ts._get_table(request.tickets_db, ts.tablename, request.application)
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
cond = False
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename, db, request=request):
keyed = hasattr(db[tablename], '_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (
request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form, table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request, db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query, ignore_common_filters=True).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
try:
is_imap = db._uri.startswith("imap://")
except (KeyError, AttributeError, TypeError):
is_imap = False
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args) > 1 and hasattr(db[request.args[1]], '_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
step = 100
fields = []
if is_imap:
step = 3
stop = start + step
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '',
requires=IS_NOT_EMPTY(
error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '')), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value=T('submit')))),
_action=URL(r=request, args=request.args))
tb = None
if form.accepts(request.vars, formname=None):
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query, ignore_common_filters=True).count()
if form.vars.update_check and form.vars.update_fields:
db(query, ignore_common_filters=True).update(
**eval_in_global_env('dict(%s)' % form.vars.update_fields))
response.flash = T('%s %%{row} updated', nrows)
elif form.vars.delete_check:
db(query, ignore_common_filters=True).delete()
response.flash = T('%s %%{row} deleted', nrows)
nrows = db(query, ignore_common_filters=True).count()
if is_imap:
fields = [db[table][name] for name in
("id", "uid", "created", "to",
"sender", "subject")]
if orderby:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop))
except Exception, e:
import traceback
tb = traceback.format_exc()
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'), PRE(str(e)))
# begin handle upload csv
csv_table = table or request.vars.table
if csv_table:
formcsv = FORM(str(T('or import from csv file')) + " ",
INPUT(_type='file', _name='csvfile'),
INPUT(_type='hidden', _value=csv_table, _name='table'),
INPUT(_type='submit', _value=T('import')))
else:
formcsv = None
if formcsv and formcsv.process().accepted:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'), PRE(str(e)))
# end handle upload csv
return dict(
form=form,
table=table,
start=start,
stop=stop,
step=step,
nrows=nrows,
rows=rows,
query=request.vars.query,
formcsv=formcsv,
tb=tb
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table], '_primarykey')
record = None
db[table]._common_filter = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[
0]]).select().first()
else:
record = db(db[table].id == request.args(
2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable = False
form = SQLFORM(
db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form, table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
cache.ram.initialize()
cache.disk.initialize()
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
clear_ram = False
clear_disk = False
session.flash = ""
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from guppy import hpy
hp = hpy()
except ImportError:
hp = False
import shelve
import os
import copy
import time
import math
from gluon import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
for key, value in cache.ram.storage.iteritems():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
folder = os.path.join(request.folder,'cache')
if not os.path.exists(folder):
os.mkdir(folder)
locker = open(os.path.join(folder, 'cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(
os.path.join(folder, 'cache.shelve'))
try:
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
finally:
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['entries'] = ram['entries'] + disk['entries']
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
total['keys'] = ram['keys'] + disk['keys']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=hp != False)
def table_template(table):
from gluon.html import TR, TD, TABLE, TAG
def FONT(*args, **kwargs):
return TAG.font(*args, **kwargs)
def types(field):
f_type = field.type
if not isinstance(f_type,str):
return ' '
elif f_type == 'string':
return field.length
elif f_type == 'id':
return B('pk')
elif f_type.startswith('reference') or \
f_type.startswith('list:reference'):
return B('fk')
else:
return ' '
# This is horribe HTML but the only one graphiz understands
rows = []
cellpadding = 4
color = "#000000"
bgcolor = "#FFFFFF"
face = "Helvetica"
face_bold = "Helvetica Bold"
border = 0
rows.append(TR(TD(FONT(table, _face=face_bold, _color=bgcolor),
_colspan=3, _cellpadding=cellpadding,
_align="center", _bgcolor=color)))
for row in db[table]:
rows.append(TR(TD(FONT(row.name, _color=color, _face=face_bold),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(row.type, _color=color, _face=face),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(types(row), _color=color, _face=face),
_align="center", _cellpadding=cellpadding,
_border=border)))
return "< %s >" % TABLE(*rows, **dict(_bgcolor=bgcolor, _border=1,
_cellborder=0, _cellspacing=0)
).xml()
def bg_graph_model():
graph = pgv.AGraph(layout='dot', directed=True, strict=False, rankdir='LR')
subgraphs = dict()
for tablename in db.tables:
if hasattr(db[tablename],'_meta_graphmodel'):
meta_graphmodel = db[tablename]._meta_graphmodel
else:
meta_graphmodel = dict(group='Undefined', color='#ECECEC')
group = meta_graphmodel['group'].replace(' ', '')
if not subgraphs.has_key(group):
subgraphs[group] = dict(meta=meta_graphmodel, tables=[])
subgraphs[group]['tables'].append(tablename)
else:
subgraphs[group]['tables'].append(tablename)
graph.add_node(tablename, name=tablename, shape='plaintext',
label=table_template(tablename))
for n, key in enumerate(subgraphs.iterkeys()):
graph.subgraph(nbunch=subgraphs[key]['tables'],
name='cluster%d' % n,
style='filled',
color=subgraphs[key]['meta']['color'],
label=subgraphs[key]['meta']['group'])
for tablename in db.tables:
for field in db[tablename]:
f_type = field.type
if isinstance(f_type,str) and (
f_type.startswith('reference') or
f_type.startswith('list:reference')):
referenced_table = f_type.split()[1].split('.')[0]
n1 = graph.get_node(tablename)
n2 = graph.get_node(referenced_table)
graph.add_edge(n1, n2, color="#4C4C4C", label='')
graph.layout()
if not request.args:
response.headers['Content-Type'] = 'image/png'
return graph.draw(format='png', prog='dot')
else:
response.headers['Content-Disposition']='attachment;filename=graph.%s'%request.args(0)
if request.args(0) == 'dot':
return graph.string()
else:
return graph.draw(format=request.args(0), prog='dot')
def graph_model():
return dict(databases=databases, pgv=pgv)
def manage():
tables = manager_action['tables']
if isinstance(tables[0], str):
db = manager_action.get('db', auth.db)
db = globals()[db] if isinstance(db, str) else db
tables = [db[table] for table in tables]
if request.args(0) == 'auth':
auth.table_user()._plural = T('Users')
auth.table_group()._plural = T('Roles')
auth.table_membership()._plural = T('Memberships')
auth.table_permission()._plural = T('Permissions')
if request.extension != 'load':
return dict(heading=manager_action.get('heading',
T('Manage %(action)s') % dict(action=request.args(0).replace('_', ' ').title())),
tablenames=[table._tablename for table in tables],
labels=[table._plural.title() for table in tables])
table = tables[request.args(1, cast=int)]
formname = '%s_grid' % table._tablename
linked_tables = orderby = None
if request.args(0) == 'auth':
auth.table_group()._id.readable = \
auth.table_membership()._id.readable = \
auth.table_permission()._id.readable = False
auth.table_membership().user_id.label = T('User')
auth.table_membership().group_id.label = T('Role')
auth.table_permission().group_id.label = T('Role')
auth.table_permission().name.label = T('Permission')
if table == auth.table_user():
linked_tables=[auth.settings.table_membership_name]
elif table == auth.table_group():
orderby = 'role' if not request.args(3) or '.group_id' not in request.args(3) else None
elif table == auth.table_permission():
orderby = 'group_id'
kwargs = dict(user_signature=True, maxtextlength=1000,
orderby=orderby, linked_tables=linked_tables)
smartgrid_args = manager_action.get('smartgrid_args', {})
kwargs.update(**smartgrid_args.get('DEFAULT', {}))
kwargs.update(**smartgrid_args.get(table._tablename, {}))
grid = SQLFORM.smartgrid(table, args=request.args[:2], formname=formname, **kwargs)
return grid
| |
#Distributed under the MIT licesnse.
#Copyright (c) 2013 Cospan Design (dave.mccoy@cospandesign.com)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import json
import platform
import glob
import re
PROJECT_BASE = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
DEFAULT_CONFIG_FILE = "config.json"
DEFAULT_BUILD_DIR = "build"
TOOL_TYPES=("ise",
"planahead",
"vivado")
LINUX_XILINX_DEFAULT_BASE = "/opt/Xilinx"
WINDOWS_XILINX_DEFAULT_BASE = "Xilinx"
class ConfigurationError(Exception):
"""
Errors associated with configuration:
getting the configuration file for the project
getting the default xilinx toolchain
"""
pass
def get_project_base():
"""
Returns the project base directory
Args:
Nothing
Returns:
Path (String) to base directory
Raises:
Nothing
"""
return PROJECT_BASE
def get_window_drives():
"""
Returns a list of drives for a windows box
Args:
Nothing
Return:
Returns a list of drives in a list
"""
if os.name != "nt":
raise ConfigurationError("Not a windows box")
import string
from ctypes import windll
drives = []
bitmask = windll.kernel32.GetLogicalDrives()
for letter in string.uppercase:
#For every letter of the alphabet (string.uppercase)
if bitmask & 1:
#if the associated bit for that letter is set
drives.append(letter)
bitmaks >>= 1
return drives
def find_license_dir(path = ""):
"""
Based on the operating system attempt to find the license in the default
locations
Args:
path (string): a path to the license, or a path to start searching
for the license
Returns:
(string) A path to where the license files are
Raises:
Configuration Error when a license cannot be found
"""
if len (path) > 0:
if os.path.exists(path):
return path
if os.name == "posix":
#First attemp to find the file in the default location
home = os.environ["HOME"]
xilinx_dir = os.path.join(home, ".Xilinx")
if os.path.exists(xilinx_dir):
search_path = os.path.join(xilinx_dir, "*.lic")
results = glob.glob(search_path)
if len(search_path) > 0:
#print "Found directory: %s, results: %s" % (xilinx_dir, str(results[0]))
return search_path
raise ConfiugrationError("Error unable to find Xilinx Lincense File")
elif os.name == "nt":
print "Windows box... TODO :("
raise ConfiugrationError("Error unable to find Xilinx Lincense File on Windows box")
def find_xilinx_path(path = "", build_tool = "ISE", version_number = ""):
"""
Finds the path of the xilinx build tool specified by the user
Args:
path (string): a path to the base directory of xilinx
(leave empty to use the default location)
build_type (string): to use, valid build types are found with
get_xilinx_tool_types
(leave empty for "ISE")
version_number (string): specify a version number to use
for one of the tool chain: EG
build_tool = ISE version_number = 13.2
build_tool = Vivado version_number = 2013.1
(leave empty for the latest version)
Returns:
A path to the build tool, None if not found
Raises:
Configuration Error
"""
#Searches for the xilinx tool in the default locations in Linux and windows
if build_tool.lower() not in TOOL_TYPES:
raise ConfigurationError("Build tool: (%s) not recognized \
the following build tools are valid: %s" %
(build_tool, str(TOOL_TYPES)))
xilinx_base = ""
if os.name == "posix":
#Linux
if len(path) > 0:
xilinx_base = path
else:
xilinx_base = LINUX_XILINX_DEFAULT_BASE
#print "linux base: %s" % xilinx_base
#if not os.path.exists(xilinx_base):
if not os.path.exists(xilinx_base):
#print "path (%s) does not exists" % LINUX_XILINX_DEFAULT_BASE
return None
elif os.name == "nt":
if path is not None or len(path) > 0:
xilinx_base = path
else:
#Windows
drives = get_window_drives()
for drive in drives:
#Check each base directory
try:
dirnames = os.listdir("%s:" % drive)
if WINDOWS_XLINX_DEFAULT_BASE in dirnames:
xilinx_base = os.path.join("%s:" % drive,
WINDOWS_XILINX_DEFUALT_BASE)
if os.path.exists(xilinx_base):
#this doesn't exists
continue
#Found the first occurance of Xilinx drop out
break
except WindowsError, err:
#This drive is not usable
pass
if len(xiilinx_base) == 0:
return None
#Found the Xilinx base
dirnames = os.listdir(xilinx_base)
if build_tool.lower() == "ise" or build_tool.lower() == "planahead":
"ISE and Plan Ahead"
if len(version_number) > 0:
if version_number not in dirnames:
raise ConfigurationError(
"Version number: %s not found in %s" %
(version_number, xilinx_base))
return os.path.join(xilinx_base, version_number, "ISE_DS")
#get the ISE/planahead base
f = -1.0
max_float_dir = ""
for fdir in os.listdir(xilinx_base):
#print "fdir: %s" % fdir
try:
if f < float(fdir):
f = float(fdir)
#print "Found a float: %f" % f
max_float_dir = fdir
except ValueError, err:
#Not a valid numeric directory
pass
return os.path.join(xilinx_base, max_float_dir, "ISE_DS")
else:
if "Vivado" not in dirnames:
raise ConfigurationError(
"Vivado is not in the xilinx directory")
xilinx_base = os.path.join(xilinx_base, "Vivado")
if len(os.listdir(xilinx_base)) == 0:
raise ConfigurationError(
"Vivado directory is empty!")
if len(version_number) > 0:
if version_number in os.listdir(xilinx_base):
xilinx_base = os.path.join(xilinx_base, version_number)
return xilinx_base
float_max = float(os.listdir(xilinx_base)[0])
for f in os.listdir(xilinx_base):
if float(f) > float_max:
float_max = float(f)
xilinx_base = os.path.join(xilinx_base, str(float_max))
return xilinx_base
| |
import pytest
networkx = pytest.importorskip("networkx")
from pyscipopt import Model, Conshdlr, SCIP_RESULT, SCIP_PARAMEMPHASIS, SCIP_PARAMSETTING
try:
from types import SimpleNamespace
except:
class SimpleNamespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
return self.__dict__ == other.__dict__
#initial Sudoku values
init = [5, 3, 0, 0, 7, 0, 0, 0, 0,
6, 0, 0, 1, 9, 5, 0, 0, 0,
0, 9, 8, 0, 0, 0, 0, 6, 0,
8, 0, 0, 0, 6, 0, 0, 0, 3,
4, 0, 0, 8, 0, 3, 0, 0, 1,
7, 0, 0, 0, 2, 0, 0, 0, 6,
0, 6, 0, 0, 0, 0, 2, 8, 0,
0, 0, 0, 4, 1, 9, 0, 0, 5,
0, 0, 0, 0, 8, 0, 0, 7, 9]
def plot_graph(G):
plt = pytest.importorskip("matplotlib.pyplot")
X,Y = networkx.bipartite.sets(G)
pos = dict()
pos.update( (n, (1, i)) for i, n in enumerate(X) ) # put nodes from X at x=1
pos.update( (n, (2, i)) for i, n in enumerate(Y) ) # put nodes from Y at x=2
networkx.draw(G, pos=pos, with_labels=False)
labels = {}
for node in G.nodes():
labels[node] = node
networkx.draw_networkx_labels(G, pos, labels)
plt.show()
# all different constraint handler
class ALLDIFFconshdlr(Conshdlr):
# value graph: bipartite graph between variables and the union of their domains
# an edge connects a variable and a value iff the value is in the variable's domain
def build_value_graph(self, vars, domains):
#print(domains)
vals = set([])
for var in vars:
#print("domain of var ", var.name, "is ", domains[var])
vals.update(domains[var.ptr()]) # vals = vals union domains[var]
G = networkx.Graph()
G.add_nodes_from((var.name for var in vars), bipartite = 0) # add vars names as nodes
G.add_nodes_from(vals, bipartite = 1) # add union of values as nodes
for var in vars:
for value in domains[var.ptr()]:
G.add_edge(var.name, value)
return G, vals
# propagates single constraint: uses Regin's Algorithm as described in
# https://www.ps.uni-saarland.de/courses/seminar-ws04/papers/anastasatos.pdf
# The idea is that every solution of an all different constraint corresponds to a maximal matching in
# a bipartite graph (see value graph). Furthermore, if an arc of this arc is in no maximal matching, then
# one can remove it. Removing and arc corresponds to remove a value in the domain of the variable.
# So what the algorithm does is to determine which arcs can be in a maximal matching. Graph theory help
# us build fast algorithm so that we don't have to compute all possible maximal matchings ;)
# That being said, the implementation is pretty naive and brute-force, so there is a lot of room for improvement
def propagate_cons(self, cons):
#print("propagating cons %s with id %d"%(cons.name, id(cons)))
vars = cons.data.vars
domains = cons.data.domains
# TODO: would be nice to have a flag to know whether we should propagate the constraint.
# We would need an event handler to let us know whenever a variable of our constraint changed its domain
# Currently we can't write event handlers in python.
G, vals = self.build_value_graph(vars, domains)
try:
M = networkx.bipartite.maximum_matching(G) # returns dict between nodes in matching
except:
top_nodes = {n for n, d in G.nodes(data=True) if d['bipartite'] == 0}
bottom_nodes = set(G) - top_nodes
M = networkx.bipartite.maximum_matching(G, top_nodes) # returns dict between nodes in matching
if( len(M)/2 < len(vars) ):
#print("it is infeasible: max matching of card ", len(M), " M: ", M)
#print("Its value graph:\nV = ", G.nodes(), "\nE = ", G.edges())
plot_graph(G)
return SCIP_RESULT.CUTOFF
# build auxiliary directed graph: direct var -> val if [var, val] is in matching, otherwise var <- val
# note that all vars are matched
D = networkx.DiGraph()
D.add_nodes_from(G) ## this seems to work
for var in vars:
D.add_edge(var.name, M[var.name])
for val in domains[var.ptr()]:
if val != M[var.name]:
D.add_edge(val, var.name)
# find arcs that *do not* need to be removed and *remove* them from G. All remaining edges of G
# should be use to remove values from the domain of variables
# get all free vertices
V = set(G.nodes())
V_matched = set(M)
V_free = V.difference(V_matched)
#print("matched nodes ", V_matched, "\nfree nodes ", V_free)
# TODO quit() << this produces an assertion
# no variable should be free!
for var in vars:
assert var.name not in V_free
# perform breadth first search starting from free vertices and mark all visited edges as useful
for v in V_free:
visited_edges = networkx.bfs_edges(D, v)
G.remove_edges_from(visited_edges)
# compute strongly connected components of D and mark edges on the cc as useful
for g in networkx.strongly_connected_components(D):
for e in D.subgraph(g).edges():
if G.has_edge(*e):
G.remove_edge(*e)
# cannot remove edges in matching!
for var in vars:
e = (var.name, M[var.name])
if G.has_edge(*e):
G.remove_edge(*e)
# check that there is something to remove
if G.size() == 0:
return SCIP_RESULT.DIDNOTFIND
#print("Edges to remove!", G.edges())
# remove values
for var in vars:
for val in domains[var.ptr()].copy():
if G.has_edge(var.name, val):
domains[var.ptr()].remove(val) # this asserts if value is not there and we shouldn't delete two times the same value
# "fix" variable when possible
for var in vars:
#print("domain of var ", var.name, "is ", domains[var])
minval = min(domains[var.ptr()])
maxval = max(domains[var.ptr()])
if var.getLbLocal() < minval:
self.model.chgVarLb(var, minval)
if var.getUbLocal() > maxval:
self.model.chgVarUb(var, maxval)
#print("bounds of ", var, "are (%d,%d)"%(minval,maxval))
return SCIP_RESULT.REDUCEDDOM
# propagator callback
def consprop(self, constraints, nusefulconss, nmarkedconss, proptiming): # I have no idea what to return, documentation?
result = SCIP_RESULT.DIDNOTFIND
for cons in constraints:
prop_result = self.propagate_cons(cons)
if prop_result == SCIP_RESULT.CUTOFF:
result = prop_result
break
if prop_result == SCIP_RESULT.REDUCEDDOM:
result = prop_result
return {"result": result}
def is_cons_feasible(self, cons, solution = None):
#print("checking feasibility of constraint %s id: %d"%(cons.name, id(cons)))
sol_values = set()
for var in cons.data.vars:
sol_values.add(round(self.model.getSolVal(solution, var)))
#print("sol_values = ", sol_values)
return len(sol_values) == len(cons.data.vars)
# checks whether solution is feasible, ie, if they are all different
# since the checkpriority is < 0, we are only called if the integrality
# constraint handler didn't find infeasibility, so solution is integral
def conscheck(self, constraints, solution, check_integrality, check_lp_rows, print_reason, completely):
for cons in constraints:
if not self.is_cons_feasible(cons, solution):
return {"result": SCIP_RESULT.INFEASIBLE}
return {"result": SCIP_RESULT.FEASIBLE}
# enforces LP solution
def consenfolp(self, constraints, n_useful_conss, sol_infeasible):
for cons in constraints:
if not self.is_cons_feasible(cons):
# TODO: suggest some value to branch on
return {"result": SCIP_RESULT.INFEASIBLE}
return {"result": SCIP_RESULT.FEASIBLE}
def conslock(self, constraint, locktype, nlockspos, nlocksneg):
for var in constraint.data.vars:
self.model.addVarLocks(var, nlockspos + nlocksneg , nlockspos + nlocksneg)
def constrans(self, constraint):
#print("CONSTRANS BEING CAAAAAAAAAAAAAAAAAAAALLLLLLED")
return {}
# builds sudoku model; adds variables and all diff constraints
def create_sudoku():
scip = Model("Sudoku")
x = {} # values of squares
for row in range(9):
for col in range(9):
# some variables are fix
if init[row*9 + col] != 0:
x[row,col] = scip.addVar(vtype = "I", lb = init[row*9 + col], ub = init[row*9 + col], name = "x(%s,%s)" % (row,col))
else:
x[row,col] = scip.addVar(vtype = "I", lb = 1, ub = 9, name = "x(%s,%s)" % (row,col))
var = x[row,col]
#print("built var ", var.name, " with bounds: (%d,%d)"%(var.getLbLocal(), var.getUbLocal()))
conshdlr = ALLDIFFconshdlr()
# hoping to get called when all vars have integer values
scip.includeConshdlr(conshdlr, "ALLDIFF", "All different constraint", propfreq = 1, enfopriority = -10, chckpriority = -10)
# row constraints; also we specify the domain of all variables here
# TODO/QUESTION: in principle domain is of course associated to the var and not the constraint. it should be "var.data"
# But ideally that information would be handle by SCIP itself... the reason we can't is because domain holes is not implemented, right?
domains = {}
for row in range(9):
vars = []
for col in range(9):
var = x[row,col]
vars.append(var)
vals = set(range(int(round(var.getLbLocal())), int(round(var.getUbLocal())) + 1))
domains[var.ptr()] = vals
# this is kind of ugly, isn't it?
cons = scip.createCons(conshdlr, "row_%d" % row)
#print("in test: received a constraint with id ", id(cons)) ### DELETE
cons.data = SimpleNamespace() # so that data behaves like an instance of a class (ie, cons.data.whatever is allowed)
cons.data.vars = vars
cons.data.domains = domains
scip.addPyCons(cons)
# col constraints
for col in range(9):
vars = []
for row in range(9):
var = x[row,col]
vars.append(var)
cons = scip.createCons(conshdlr, "col_%d"%col)
cons.data = SimpleNamespace()
cons.data.vars = vars
cons.data.domains = domains
scip.addPyCons(cons)
# square constraints
for idx1 in range(3):
for idx2 in range(3):
vars = []
for row in range(3):
for col in range(3):
var = x[3*idx1 + row, 3*idx2 + col]
vars.append(var)
cons = scip.createCons(conshdlr, "square_%d-%d"%(idx1, idx2))
cons.data = SimpleNamespace()
cons.data.vars = vars
cons.data.domains = domains
scip.addPyCons(cons)
#scip.setObjective()
return scip, x
def test_main():
scip, x = create_sudoku()
scip.setBoolParam("misc/allowstrongdualreds", False)
scip.setEmphasis(SCIP_PARAMEMPHASIS.CPSOLVER)
scip.setPresolve(SCIP_PARAMSETTING.OFF)
scip.optimize()
if scip.getStatus() != 'optimal':
print('Sudoku is not feasible!')
else:
print('\nSudoku solution:\n')
for row in range(9):
out = ''
for col in range(9):
out += str(round(scip.getVal(x[row,col]))) + ' '
print(out)
if __name__ == "__main__":
test_main()
| |
'''
Title: SAP Security Interactive tool 1.3
Author: Joe Friedrich
License: MIT
'''
print('Importing libraries')
import os
import pyperclip
from company import Company
from sys import stdin
def get_menu(list_of_things):
'''
This takes a list as input.
It prints the list to the screen as menu options 1 through len(list).
It calls and returns the results of get_menu_input.
'''
menu_number = 0
print('--------------------------------')
for thing in list_of_things:
menu_number += 1
print(str(menu_number) + ': ' + thing)
print('--------------------------------')
return get_menu_input(menu_number)
def get_menu_input(menu_total):
'''
This takes the final number of possible selections (whole number).
It takes user input and
-if the input is between 1 and the last possible selection
it returns the number the user typed in.
-if the number is greater than the last possible selection
or less than 1
or not a whole number
or not a number at all
it returns 0.
'''
while True:
try:
print('Type a number from the menu and hit enter.')
user_input = int(input('Make any other entry and enter to stop: '))
print('\n')
if 1 <= user_input <= menu_total:
return user_input
else: #the numerical value is not a menu option
print('\nAre you sure you are finished?')
print('Press y and enter to quit.')
quitting = input('Press any other key and enter to continue: ')
if quitting in ['y', 'Y']:
return 0
except ValueError: #if the value is a non-numeral or float
print('\nAre you sure you are finished?')
print('Press y and enter to quit.')
quitting = input('Press any other key and enter to continue: ')
if quitting in ['y', 'Y']:
return 0
def get_role_input():
'''
This is just a placeholder for user input that is not in the form
of get_menu_input.
'''
return stdin.readlines()
def find_and_sort_roles(company, user_input):
'''
Takes a list of strings (user_input) and an int user_region.
It filters each string of user_input through that companies regex.
[It forces the input string to be all uppercase letters.]
Takes the results of that filter and
tries to find it in that company's role_lookup.
If it is a Regional role, the lenght of appovers will be > 1.
Collects tuples (name, description, approver).
Returns the list of tuples sorted by approver name.
'''
user_region = -1
roles_and_approvers = []
for line in user_input:
find_role = company.role_format.search(line.upper())
role_not_found = True
if(find_role != None):
for row in company.role_lookup:
if find_role.group() == row.name:
if len(row.approvers) > 1:
if user_region < 0:
print('\nIn which region does the user work?')
select_region = get_menu(company.region_lookup)
while select_region == 0:
print('\nPlease select a valid region. You cannot quit from here.\n')
select_region = get_menu(company.region_lookup)
user_region = select_region - 1
roles_and_approvers.append((row.name,
row.description,
row.approvers[user_region]))
else:
roles_and_approvers.append((row.name,
row.description,
row.approvers[0]))
role_not_found = False
if role_not_found:
print('\r\nRole ' + find_role.group() + ' was not found.')
return sorted(roles_and_approvers, key = lambda entry: entry[2])
def parse_role_tuples(output, company, clipboard):
'''
Takes the list of tuples (output), company object, and the clipboard.
Output is expected to be sorted by approver (output[2]).
Clipboard should be blank.
For every unique approver in the tuples, it will print and append a
header to the clipboard.
It will also append the email address to approver_emails.
These are followed by the role names until a new approver name is
found or the list ends.
Returns a list of strings (approver_emails) and the clipboard.
'''
approvers_without_email = ['Do Not Assign',
'NO APPROVAL REQUIRED',
'No Approval Needed',
'Do Not Assign - Parent Role',
'Parent Role: ASSIGN ONLY CHILD ROLES FOR THIS ACCESS']
current_approver = ''
approver_emails = []
for role_tuple in output:
if(role_tuple[2] != current_approver):
current_approver = role_tuple[2]
print('\n' + user_client +
' -- awaiting approval from ' + current_approver)
clipboard += '\r\n' + user_client + ' -- awaiting approval from ' + current_approver + '\r\n'
if current_approver not in approvers_without_email:
if current_approver in company.email_lookup:
approver_emails.append(company.email_lookup[
current_approver])
else:
approver_emails.append(current_approver +
"'s email is missing")
print(role_tuple[0] + '\t ' + role_tuple[1])
clipboard += role_tuple[0] + '\t ' + role_tuple[1] + '\r\n'
return approver_emails, clipboard
def single_approvers(company, email_list, clipboard):
'''
Takes list of tuples, list of strings, and clipboard.
Will take one single approver at a time and append it's data to
email_list and clipboard.
Returns list of strings (email_list) and clipboard.
'''
while True:
print('\n*********Single Approver Client Section********************')
print('***This will print your single client approvers UNTIL *****')
print('*****you make a selection that is NOT on the menu.*********')
print('***********************************************************')
print('\nDoes the user need additional '
'access in single approver clients?')
menu_options = [client[0] for client in company.single_approver_lookup]
select_single_client = get_menu(menu_options)
if select_single_client == 0:
break
selected_client = company.single_approver_lookup[select_single_client - 1]
current_approver = selected_client[3]
print('\n' + selected_client[1] + ' -- ' + selected_client[2] +
' -- awaiting approval from ' + current_approver + '\n' +
selected_client[4])
clipboard += '\r\n' + selected_client[1] + ' -- ' + selected_client[2] + ' -- awaiting approval from ' + current_approver + '\r\n' + selected_client[4] + '\r\n'
#need a check for multiple email approvers
if current_approver in company.email_lookup:
email_list.append(company.email_lookup[current_approver])
else:
email_list.append(current_approver + "'s email is missing")
return email_list, clipboard
def email_format(email_list):
'''
Takes a list of strings.
Returns a concatination of this list.
'''
email_output = ''
email_separator_character = ','
clean_email = -1 * len(email_separator_character)
for email in email_list:
email_output += email + email_separator_character
print('\r\n' + email_output[:clean_email] + '\r\n')
return str('\r\n' + email_output[:clean_email] + '\r\n')
#--------------------------End Local Functions--------------------------------
print('Loading companies.')
company1 = Company('Company1',
r'/home/joe/Code/github/Excel-Matrix-Rewrite/Company1Data',
r'[A-Z]{1,3}(:|_)\S+',
['ProdC1', 'QaC1', 'ProdC1/QaC1', 'DevC1', 'QaC1/DevC1'])
company2 = Company('Company2',
r'/home/joe/Code/github/Excel-Matrix-Rewrite/Company2Data',
r'Z:\S{4}:\S{7}:\S{4}:\S',
['ProdC2', 'QaC2', 'ProdC2/QaC2', 'DevC2', 'QaC1/DevC2'])
company3 = Company('Company3',
r'/home/joe/Code/github/Excel-Matrix-Rewrite/Company3Data',
r'\S+',
['ProdC3', 'QaC3', 'ProdC3/QaC3', 'DevC3', 'QaC3/DevC3'])
list_companies = [company1, company2, company3]
company_names = [company.name for company in list_companies]
#----------------------------Begin Program------------------------------------
while (True):
print('\n************Welcome to the SAP Access Request Tool************')
if len(company_names) == 1:
print('This request is for ' + company_names[0] + '.')
company = list_companies[0]
else:
print('To which company does the user belong?')
select_company = get_menu(company_names)
if select_company == 0:
break
company = list_companies[select_company - 1]
print("\nPaste the roles in, one per line.")
print("On a new line, hit Ctrl+Z and Enter to continue.")
requested_roles = get_role_input()
role_tuples = find_and_sort_roles(company, requested_roles)
clipboard = ''
pyperclip.copy(clipboard) #Clears the clipboard. New data coming.
if requested_roles != []:
print('\nIn which SAP client does the user want the access?')
select_client = get_menu(company.clients)
if select_client == 0:
break
user_client = company.clients[select_client - 1]
email_list, clipboard = parse_role_tuples(role_tuples, company, clipboard)
else:
email_list = []
email_list, clipboard = single_approvers(company, email_list, clipboard)
clipboard += email_format(email_list)
pyperclip.copy(clipboard)
print('\n**YOUR OUTPUT IS IN THE CLIPBOARD. PASTE IT INTO YOUR TICKET.**')
pause = input('Press enter to continue.')
os.system('clear') #clears a terminal/powershell screen
#os.system('cls') #clears the cmd screen [Windows]
| |
"""The texture module provide some utilities to generate, analyse and plot
crystallographic textures.
"""
import numpy as np
from pymicro.crystal.lattice import Symmetry, Lattice, HklPlane, SlipSystem
from pymicro.crystal.microstructure import Orientation, Grain, Microstructure
from matplotlib import pyplot as plt, colors, cm
class PoleFigure:
"""A class to create pole figures.
A pole figure is a useful tool to plot multiple crystal orientations,
either in the sample coordinate system (direct pole figure) or
alternatively plotting a particular direction in the crystal
coordinate system (inverse pole figure).
"""
def __init__(self, microstructure=None, lattice=None, axis='Z', hkl='111',
proj='stereo', verbose=False):
"""
Create an empty PoleFigure object associated with a Microstructure.
.. warning::
Any crystal structure is now supported (you have to set the proper
crystal lattice) but it has only really be tested for cubic.
:param microstructure: the :py:class:`~pymicro.crystal.microstructure.Microstructure`
containing the collection of orientations to plot (None by default).
:param lattice: the crystal :py:class:`~pymicro.crystal.lattice.Lattice`.
:param str axis: the pole figure axis ('Z' by default), vertical axis in
the direct pole figure and direction plotted on the inverse pole figure.
:param str hkl: slip plane family ('111' by default)
:param str proj: projection type, can be either 'stereo' (default) or 'flat'
:param bool verbose: verbose mode (False by default)
"""
self.proj = proj
self.axis = axis
self.map_field = None
if microstructure:
self.microstructure = microstructure
else:
self.microstructure = Microstructure()
if lattice:
self.lattice = lattice
else:
self.lattice = Lattice.cubic(1.0)
self.family = None
self.poles = []
self.set_hkl_poles(hkl)
self.verbose = verbose
self.resize_markers = False
self.mksize = 50
self.pflegend = False
self.x = np.array([1., 0., 0.])
self.y = np.array([0., 1., 0.])
self.z = np.array([0., 0., 1.])
# list all crystal directions
#self.c001s = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.float)
#self.c011s = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0], [0, -1, 1], [-1, 0, 1], [-1, 1, 0]],
# dtype=np.float) / np.sqrt(2)
#self.c111s = np.array([[1, 1, 1], [-1, -1, 1], [1, -1, 1], [-1, 1, 1]], dtype=np.float) / np.sqrt(3)
def get_orientations(self):
"""Get the list of orientations in the PoleFigure.
:return: a list of `Orientation` instances.
"""
return self.microstructure.get_grain_orientations()
def set_hkl_poles(self, hkl='111'):
"""Set the pole (aka hkl planes) list to to use in the `PoleFigure`.
The list of poles can be given by the family type or directly by a list
of `HklPlanes` objects.
:params str/list hkl: slip plane family ('111' by default)
"""
if type(hkl) is str:
self.family = hkl # keep a record of this
hkl_planes = self.lattice.get_hkl_family(self.family)
elif type(hkl) is list:
self.family = None
hkl_planes = hkl
self.poles = hkl_planes
def set_map_field(self, field_name, field=None, field_min_level=None, field_max_level=None, lut='hot'):
"""Set the PoleFigure to color poles with the given field.
This method activates a mode where each symbol in the pole figure
is color coded with respect to a field, which can be either the
grain id, or a given field given in form of a list. If the grain
volume or strain. For the grain id, the color is set according the
each grain id in the :py:class:`~pymicro.crystal.microstructure.Microstructure`
and the :py:meth:`~pymicro.crystal.microstructure.rand_cmap` function.
For a given field, the color is set from the lookup table and
according to the value in the given list. The list must contain a
record for each grain. Minimum and maximum value to map the field
values and the colors can be specify, if not they are directly taken
as the min() and max() of the field.
:param str field_name: The field name, could be 'grain_id', 'ipf',
'grain_size' or any other name describing the field.
:param list field: A list containing a record for each grain.
:param float field_min_level: The minimum value to use for this field.
:param float field_max_level: The maximum value to use for this field.
:param str lut: A string describing the colormap to use (among
matplotlib ones available).
:raise ValueError: If the given field does not contain enough values.
"""
self.map_field = field_name
self.lut = lut
if field_name in ['grain_id', 'ipf']:
self.field = self.microstructure.get_grain_ids()
elif field_name in ['grain_size', 'volume']:
self.field = self.microstructure.get_grain_volumes()
else:
if len(field) != self.microstructure.get_number_of_grains():
raise ValueError('The field must contain exactly one record '
'for each grain in the microstructure')
self.field = field
if not field_min_level:
self.field_min_level = self.field.min()
else:
self.field_min_level = field_min_level
if not field_max_level:
self.field_max_level = self.field.max()
else:
self.field_max_level = field_max_level
def plot_pole_figures(self, plot_sst=True, display=True, save_as='pdf'):
"""Plot and save a picture with both direct and inverse pole figures.
:param bool plot_sst: controls wether to plot the full inverse pole
figure or only the standard stereographic triangle (True by default).
:param bool display: display the plot if True, else save a picture of
the pole figures (True by default)
:param str save_as: File format used to save the image such as pdf
or png ('pdf' by default)
::
micro = Microstructure(name = 'AlLi_sam8')
micro.grains.append(Grain(11, Orientation.from_euler(np.array([262.364, 16.836, 104.691]))))
Al_fcc = Lattice.face_centered_cubic(0.405) # not really necessary since default lattice is cubic
pf = PoleFigure(microstructure=micro, proj='stereo', lattice=Al_fcc, hkl='111')
pf.mksize = 12
pf.set_map_field('grain_id')
pf.pflegend = True # this works well for a few grains
pf.plot_pole_figures()
.. figure:: _static/AlLi_sam8_pole_figure.png
:width: 750 px
:height: 375 px
:alt: AlLi_sam8_pole_figure
:align: center
A 111 pole figure plotted for a single crystal orientation.
"""
fig = plt.figure(figsize=(10, 5))
# direct PF
ax1 = fig.add_subplot(121, aspect='equal')
self.plot_pf(ax=ax1, mk='o', ann=False)
# inverse PF
ax2 = fig.add_subplot(122, aspect='equal')
if plot_sst:
self.plot_sst(ax=ax2)
else:
self.plot_ipf(ax=ax2)
if display:
plt.show()
else:
plt.savefig('%s_pole_figure.%s' % (self.microstructure.get_sample_name(), save_as), format=save_as)
def plot_crystal_dir(self, c_dir, **kwargs):
"""Function to plot a crystal direction on a pole figure.
:param c_dir: A vector describing the crystal direction.
:param dict kwargs: a dictionnary of keyword/values to control the
plot, it should at least contain a reference to a pyplot axes
to draw the pole using keyword 'ax'.
:raise ValueError: if the projection type is not supported
"""
if c_dir[2] < 0:
c_dir *= -1 # make unit vector have z>0
if self.proj == 'flat':
cp = c_dir
elif self.proj == 'stereo':
c = c_dir + self.z
c /= c[2] # SP'/SP = r/z with r=1
cp = c
# cp = np.cross(c, self.z)
else:
raise ValueError('Error, unsupported projection type', self.proj)
ax = kwargs.get('ax')
mk = kwargs.get('mk', 'o')
edge_col = kwargs.get('markeredgecolor', 'k')
ann = kwargs.get('ann', None)
lab = kwargs.get('lab', '')
col = kwargs.get('col', 'k')
col = col.reshape(1,-1)
#ax.plot(cp[0], cp[1], linewidth=0, markerfacecolor=col, marker=mk,
# markeredgecolor=edge_col, markersize=self.mksize, label=lab)
mksize = kwargs.get('mksize', self.mksize)
ax.scatter(cp[0], cp[1],
linewidth=0, c=col, marker=mk,
edgecolors=edge_col, s=mksize, label=lab)
# Next 3 lines are necessary in case c_dir[2]=0, as for Euler angles [45, 45, 0]
if c_dir[2] < 0.000001:
ax.scatter(-cp[0], -cp[1],
linewidth=0, c=col, marker=mk, s=mksize, label=lab)
if ann:
ax.annotate(c_dir.view(), (cp[0], cp[1] - 0.1), xycoords='data',
fontsize=8, horizontalalignment='center',
verticalalignment='center')
def plot_line_between_crystal_dir(self, c1, c2, ax=None, steps=11, col='k'):
"""Plot a curve between two crystal directions.
The curve is actually composed of several straight lines segments to
draw from direction 1 to direction 2.
:param c1: vector describing crystal direction 1
:param c2: vector describing crystal direction 2
:param ax: a reference to a pyplot ax to draw the line
:param int steps: number of straight lines composing the curve
(11 by default)
:param col: line color (black by default)
"""
path = np.zeros((steps, 2), dtype=float)
for j, i in enumerate(np.linspace(0., 1., steps)):
ci = i * c1 + (1 - i) * c2
ci /= np.linalg.norm(ci)
if self.proj == 'stereo':
ci += self.z
ci /= ci[2]
path[j, 0] = ci[0]
path[j, 1] = ci[1]
ax.plot(path[:, 0], path[:, 1],
color=col, markersize=self.mksize, linewidth=2)
def plot_pf_background(self, ax, labels=True):
"""Function to plot the background of the pole figure.
:param ax: a reference to a pyplot ax to draw the backgroud.
:param bool labels: add lables to axes (True by default).
"""
an = np.linspace(0, 2 * np.pi, 100)
ax.plot(np.cos(an), np.sin(an), 'k-')
ax.plot([-1, 1], [0, 0], 'k-')
ax.plot([0, 0], [-1, 1], 'k-')
axe_labels = ['X', 'Y', 'Z']
if self.axis == 'Z':
(h, v, u) = (0, 1, 2)
elif self.axis == 'Y':
(h, v, u) = (0, 2, 1)
else:
(h, v, u) = (1, 2, 0)
if labels:
ax.annotate(axe_labels[h], (1.01, 0.0), xycoords='data', fontsize=16,
horizontalalignment='left', verticalalignment='center')
ax.annotate(axe_labels[v], (0.0, 1.01), xycoords='data', fontsize=16,
horizontalalignment='center', verticalalignment='bottom')
def plot_pf_dir(self, c_dir, **kwargs):
"""Plot a crystal direction in a direct pole figure.
:param c_dir: a vector describing the crystal direction.
:param dict kwargs: a dictionnary of keyword/values to control the
plot, it should at least contain a reference to a pyplot axes
to draw the pole using keyword 'ax'.
"""
if self.axis == 'Z':
(h, v, u) = (0, 1, 2)
elif self.axis == 'Y':
(h, v, u) = (0, 2, 1)
else:
(h, v, u) = (1, 2, 0)
# the direction to plot is given by c_dir[h,v,u]
if self.verbose:
print('corrected for pf axis:', c_dir[[h, v, u]])
self.plot_crystal_dir(c_dir[[h, v, u]], **kwargs)
def plot_pf(self, ax=None, mk='o', ann=False):
"""Create the direct pole figure.
:param ax: a reference to a pyplot ax to draw the poles.
:param mk: marker used to plot the poles (disc by default).
:param bool ann: Annotate the pole with the coordinates of the vector
if True (False by default).
"""
self.plot_pf_background(ax)
kwargs = {'ax': ax, 'mk': mk, 'ann': ann}
if self.resize_markers:
# compute the max grain volume to normalize
volume_max = max(self.microstructure.get_grain_volumes())
for grain in self.microstructure.grains:
g = Orientation.Rodrigues2OrientationMatrix(grain['orientation'])
gt = g.transpose()
if self.resize_markers:
kwargs['mksize'] = 0.15 * np.sqrt(grain['volume'] / volume_max) * 1000
label = ''
if self.map_field == 'grain_id':
label = 'grain ' + str(grain['idnumber'])
kwargs['lab'] = label
for i, hkl_plane in enumerate(self.poles):
if i > 0:
kwargs['lab'] = ''
c = hkl_plane.normal()
c_rot = gt.dot(c)
if self.verbose:
h, k, l = hkl_plane.miller_indices()
print('plotting (%d%d%d) with normal %s in sample CS '
'(corrected for pf axis): %s' % (h, k, l, c, c_rot))
col = self.get_color_from_field(grain)
kwargs['col'] = col
self.plot_pf_dir(c_rot, **kwargs)
ax.axis([-1.1, 1.1, -1.1, 1.1])
if self.pflegend and self.map_field == 'grain_id':
ax.legend(bbox_to_anchor=(0.05, 1), loc=1, numpoints=1, prop={'size': 10})
ax.axis('off')
ax.set_title('{%s} direct %s projection' % (self.family, self.proj))
def create_pf_contour(self, ax=None, ang_step=10):
"""Compute the distribution of orientation and plot it using contouring.
This plot the distribution of orientation in the microstructure
associated with this PoleFigure instance, as a continuous
distribution using angular bining with the specified step.
the distribution is constructed at runtime by discretizing the
angular space and counting the number of poles in each bin.
Then the plot_pf_contour method is called to actually plot the data.
.. warning::
This function has not been tested properly, use at your own risk.
:param ax: a reference to a pyplot ax to draw the contours.
:param int ang_step: angular step in degrees to use for constructing
the orientation distribution data (10 degrees by default)
"""
# discretise the angular space (azimuth and altitude)
ang_step *= np.pi / 180 # change to radians
n_phi = int(1 + 2 * np.pi / ang_step)
n_psi = int(1 + 0.5 * np.pi / ang_step)
phis = np.linspace(0, 2 * np.pi, n_phi)
psis = np.linspace(0, np.pi / 2, n_psi)
xv, yv = np.meshgrid(phis, psis)
values = np.zeros((n_psi, n_phi), dtype=int)
for grain in self.microstructure.grains:
g = Orientation.Rodrigues2OrientationMatrix(grain['orientation'])
gt = g.transpose()
for hkl_plane in self.poles:
c = hkl_plane.normal()
c_rot = gt.dot(c)
# handle poles pointing down
if c_rot[2] < 0:
c_rot *= -1 # make unit vector have z>0
if c_rot[1] >= 0:
phi = np.arccos(c_rot[0] / np.sqrt(c_rot[0] ** 2 +
c_rot[1] ** 2))
else:
phi = 2 * np.pi - np.arccos(c_rot[0] /
np.sqrt(c_rot[0] ** 2 +
c_rot[1] ** 2))
psi = np.arccos(c_rot[2]) # since c_rot is normed
i_phi = int((phi + 0.5 * ang_step) / ang_step) % n_phi
j_psi = int((psi + 0.5 * ang_step) / ang_step) % n_psi
values[j_psi, i_phi] += 1
if self.proj == 'stereo': # double check which one is flat/stereo
x = (2 * yv / np.pi) * np.cos(xv)
y = (2 * yv / np.pi) * np.sin(xv)
else:
x = np.sin(yv) * np.cos(xv)
y = np.sin(yv) * np.sin(xv)
# close the pole figure by duplicating azimuth=0
values[:, -1] = values[:, 0]
self.plot_pf_contour(ax, x, y, values)
def plot_pf_contour(self, ax, x, y, values):
"""Plot the direct pole figure using contours.
.. warning::
This function has not been tested properly, use at your own risk.
"""
self.plot_pf_background(ax)
ax.contourf(x, y, values)
# ax.plot(x, y, 'ko')
ax.axis([-1.1, 1.1, -1.1, 1.1])
ax.axis('off')
ax.set_title('{%s} direct %s projection' % (self.family, self.proj))
def sst_symmetry(self, v):
"""Transform a given vector according to the lattice symmetry associated
with the pole figure.
This function transform a vector so that it lies in the smallest
symmetry equivalent zone.
:param v: the vector to transform.
:return: the transformed vector.
"""
# get the symmetry from the lattice associated with the pole figure
symmetry = self.lattice._symmetry
if symmetry is Symmetry.cubic:
return PoleFigure.sst_symmetry_cubic(v)
elif symmetry is Symmetry.hexagonal:
syms = symmetry.symmetry_operators()
for i in range(syms.shape[0]):
sym = syms[i]
v_sym = np.dot(sym, v)
# look at vectors pointing up
if v_sym[2] < 0:
v_sym *= -1
# now evaluate if projection is in the sst
if v_sym[1] < 0 or v_sym[0] < 0:
continue
elif v_sym[1] / v_sym[0] > np.tan(np.pi / 6):
continue
else:
break
return v_sym
else:
print('unsupported symmetry: %s' % symmetry)
return None
@staticmethod
def sst_symmetry_cubic(z_rot):
"""Transform a given vector according to the cubic symmetry.
This function transform a vector so that it lies in the unit SST triangle.
:param z_rot: vector to transform.
:return: the transformed vector.
"""
if z_rot[0] < 0:
z_rot[0] = -z_rot[0]
if z_rot[1] < 0:
z_rot[1] = -z_rot[1]
if z_rot[2] < 0:
z_rot[2] = -z_rot[2]
if z_rot[2] > z_rot[1]:
z_rot[1], z_rot[2] = z_rot[2], z_rot[1]
if z_rot[1] > z_rot[0]:
z_rot[0], z_rot[1] = z_rot[1], z_rot[0]
if z_rot[2] > z_rot[1]:
z_rot[1], z_rot[2] = z_rot[2], z_rot[1]
return np.array([z_rot[1], z_rot[2], z_rot[0]])
def get_color_from_field(self, grain):
"""Get the color of the given grain according to the chosen field.
This function will return the color associated with the given grain.
Depending on how the pole figure has been configured (see the
`set_map_field` function), it will be obtained from:
* the grain id, according to the `Microstructure.rand_cmap` function
* ipf the colour will reflect the orientation according to the IPF
coloring scheme
* the field value mapped on a pyplot color map if the lut field of
the PoleFigure instance is a string.
* a color directly read from the lut field; in this case the field
value must reflect the category of the given grain.
:param grain: the `Grain` instance.
:return: the color as a 3 element numpy array representing the rgb values.
"""
if self.map_field:
if self.map_field == 'grain_id':
col = Microstructure.rand_cmap().colors[grain['idnumber']]
elif self.map_field == 'ipf':
if self.axis == 'X':
axis = np.array([1., 0., 0.])
elif self.axis == 'Y':
axis = np.array([0., 1., 0.])
else:
axis = np.array([0., 0., 1.])
col = Orientation.from_rodrigues(
grain['orientation']).get_ipf_colour(axis=axis)
else:
# retrieve the position of the grain in the list
rank = self.microstructure.get_grain_ids().tolist().index(grain['idnumber'])
if type(self.lut) is str:
# get the color map from pyplot
color_map = cm.get_cmap(self.lut, 256)
# use the field value for this grain and the field range bounds
color = int(255 * max(min((self.field[rank] - self.field_min_level) / float(
self.field_max_level - self.field_min_level), 1.0), 0.0))
col = color_map(np.arange(256))[color]
else:
col = self.lut[self.field[rank]] # directly access the color
return col
else:
return np.array([0., 0., 0.])
def plot_sst(self, **kwargs):
""" Create the inverse pole figure in the unit standard triangle.
:param ax: a reference to a pyplot ax to draw the poles.
:param mk: marker used to plot the poles (square by default).
:param bool ann: Annotate the pole with the coordinates of the vector
if True (False by default).
"""
# first draw the boundary of the symmetry domain limited by 3 hkl plane
# normals, called here A, B and C
symmetry = self.lattice.get_symmetry()
ax = kwargs.get('ax')
if symmetry is Symmetry.cubic:
sst_poles = [(0, 0, 1), (1, 0, 1), (1, 1, 1)]
ax.axis([-0.05, 0.45, -0.05, 0.40])
elif symmetry is Symmetry.hexagonal:
sst_poles = [(0, 0, 1), (2, -1, 0), (1, 0, 0)]
ax.axis([-0.05, 1.05, -0.05, 0.6])
else:
print('unsuported symmetry: %s' % symmetry)
A = HklPlane(*sst_poles[0], lattice=self.lattice)
B = HklPlane(*sst_poles[1], lattice=self.lattice)
C = HklPlane(*sst_poles[2], lattice=self.lattice)
self.plot_line_between_crystal_dir(A.normal(), B.normal(), ax=ax, col='k')
self.plot_line_between_crystal_dir(B.normal(), C.normal(), ax=ax, col='k')
self.plot_line_between_crystal_dir(C.normal(), A.normal(), ax=ax, col='k')
# display the 3 crystal axes
poles = [A, B, C]
v_align = ['top', 'top', 'bottom']
for i in range(3):
hkl = poles[i]
c_dir = hkl.normal()
c = c_dir + self.z
c /= c[2] # SP'/SP = r/z with r=1
pole_str = '%d%d%d' % hkl.miller_indices()
if symmetry is Symmetry.hexagonal:
pole_str = '%d%d%d%d' % HklPlane.three_to_four_indices(*hkl.miller_indices())
ax.annotate(pole_str, (c[0], c[1] - (2 * (i < 2) - 1) * 0.01), xycoords='data',
fontsize=12, horizontalalignment='center', verticalalignment=v_align[i])
# now plot the sample axis
if self.resize_markers:
# compute the max grain volume to normalize
volume_max = max(self.microstructure.get_grain_volumes())
for grain in self.microstructure.grains:
g = Orientation.Rodrigues2OrientationMatrix(grain['orientation'])
if self.resize_markers:
kwargs['mksize'] = 0.15 * np.sqrt(grain['volume'] / volume_max) * 1000
# compute axis and apply SST symmetry
if self.axis == 'Z':
axis = self.z
elif self.axis == 'Y':
axis = self.y
else:
axis = self.x
axis_rot = self.sst_symmetry(g.dot(axis))
label = ''
if self.map_field == 'grain_id':
label = 'grain ' + str(grain['idnumber'])
kwargs['lab'] = label
kwargs['col'] = self.get_color_from_field(grain)
self.plot_crystal_dir(axis_rot, **kwargs)
if self.verbose:
print('plotting %s in crystal CS: %s' % (self.axis, axis_rot))
ax.axis('off')
ax.set_title('%s-axis SST inverse %s projection' % (self.axis, self.proj))
def plot_ipf(self, **kwargs):
""" Create the inverse pole figure for direction Z.
:param ax: a reference to a pyplot ax to draw the poles.
:param mk: marker used to plot the poles (square by default).
:param bool ann: Annotate the pole with the coordinates of the vector
if True (False by default).
"""
ax = kwargs.get('ax')
self.plot_pf_background(ax, labels=False)
# now plot the sample axis
for grain in self.microstructure.grains:
g = Orientation.Rodrigues2OrientationMatrix(grain['orientation'])
if self.axis == 'Z':
axis = self.z
elif self.axis == 'Y':
axis = self.y
else:
axis = self.x
axis_rot = g.dot(axis)
kwargs['col'] = self.get_color_from_field(grain)
self.plot_crystal_dir(axis_rot, **kwargs)
if self.verbose:
print('plotting ', self.axis, ' in crystal CS:', axis_rot)
ax.axis([-1.1, 1.1, -1.1, 1.1])
ax.axis('off')
ax.set_title('%s-axis inverse %s projection' % (self.axis, self.proj))
@staticmethod
def plot(orientations, **kwargs):
"""Plot a pole figure (both direct and inverse) for a list of crystal
orientations.
:param orientations: the list of crystalline
:py:class:`~pymicro.crystal.microstructure.Orientation` to
plot.
"""
micro = Microstructure(autodelete=True)
if isinstance(orientations, list):
for i in range(len(orientations)):
micro.add_grains([o.euler for o in orientations])
elif isinstance(orientations, Orientation):
micro.add_grains([orientations.euler])
else:
print('Unrecognized argument: %s' % orientations.__repr__)
pf = PoleFigure(microstructure=micro, **kwargs)
pf.plot_pole_figures(display=True)
@staticmethod
def plot_euler(phi1, Phi, phi2, **kwargs):
"""Directly plot a pole figure for a single orientation given its
three Euler angles.
::
PoleFigure.plot_euler(10, 20, 30)
:param float phi1: first Euler angle (in degree).
:param float Phi: second Euler angle (in degree).
:param float phi2: third Euler angle (in degree).
"""
PoleFigure.plot(Orientation.from_euler(np.array([phi1, Phi, phi2])), **kwargs)
class TaylorModel:
"""A class to carry out texture evolution with the Taylor model.
Briefly explain the full constrained Taylor model [ref 1938].
"""
def __init__(self, microstructure):
self.micro = microstructure # Microstructure instance
self.slip_systems = SlipSystem.get_slip_systems('111')
self.nact = 5 # number of active slip systems in one grain to accomodate the plastic strain
self.dt = 1.e-3
self.max_time = 0.001 # sec
self.time = 0.0
self.L = np.array([[-0.5, 0.0, 0.0], [0.0, -0.5, 0.0], [0.0, 0.0, 1.0]]) # velocity gradient
def compute_step(self, g, check=True):
Wc = np.zeros((3, 3), dtype=np.float)
# compute Schmid factors
SF = []
for s in self.slip_systems:
SF.append(g.schmid_factor(s))
ss_rank = np.zeros(self.nact, dtype=int)
# rank the slip systems by SF
for i in range(self.nact):
ss_rank[i] = np.argmax(SF)
print('index of ss % d is %d' % (i, ss_rank[i]))
SF[ss_rank[i]] = 0.0
# now we need to solve: L = gam1*m1 + gam2*m2+ ...
iu = np.triu_indices(3) # indices of the upper part of a 3x3 matrix
L = self.L[iu][:5] # form a vector with the velocity gradient components
M = np.zeros((5, self.nact), dtype=np.float)
for i in range(len(ss_rank)):
s = self.slip_systems[ss_rank[i]]
m = g.orientation.slip_system_orientation_tensor(s)
# m = g.orientation.slip_system_orientation_strain_tensor(s)
M[0, i] += m[0, 0]
M[1, i] += m[0, 1]
M[2, i] += m[0, 2]
M[3, i] += m[1, 1]
M[4, i] += m[1, 2]
# M[5, i] += m[2, 2]
dgammas = np.linalg.lstsq(M, L, rcond=1.e-3)[0]
'''
U, s, V = np.linalg.svd(M) # solve by SVD
print('U:\n')
print(U)
print('s:\n')
print(s)
print('V:\n')
print(V)
pinv_svd = np.dot(np.dot(V.T, np.linalg.inv(np.diag(s))), U.T)
dgammas_svd = np.dot(pinv_svd, L) # solving Ax=b computing x = A^-1*b
print('dgammas (SVD) =', dgammas_svd)
'''
print('dgammas (LST) =', dgammas)
if check:
# check consistency
Lcheck = np.zeros((3, 3), dtype=np.float)
for i in range(len(ss_rank)):
s = self.slip_systems[ss_rank[i]]
ms = g.orientation.slip_system_orientation_tensor(s)
# ms = g.orientation.slip_system_orientation_strain_tensor(s)
Lcheck += dgammas[i] * ms
print('check:', np.sum(Lcheck - self.L), '\n', Lcheck)
if abs(np.sum(Lcheck - self.L)) > 1e-1:
raise ValueError(
'Problem with solving for plastic slip, trying to increase the number of active slip systems')
# compute the plastic spin
for i in range(len(ss_rank)):
s = self.slip_systems[ss_rank[i]]
qs = g.orientation.slip_system_orientation_rotation_tensor(s)
Wc += dgammas[i] * qs
print('plastic spin:\n', Wc)
return Wc, dgammas
| |
# dependency.py
# Author: Dixon Crews
# CSC 505-001, Fall 2016
# Homework 3, #4
###############################################################################
# Import needed library
import operator
###############################################################################
# Define the Node class
# Follows the same guidelines as the algorithm from our book,
# giving nodes a color, "pi" which is the predecessor,
# d which is the discovey time, and f which is the finish time.
class Node:
def __init__(self, value, color=None, pi=None, d=None, f=None):
self.value = value
self.adjNodes = []
self.color = color
self.pi = pi
self.d = d
self.f = f
###############################################################################
# Define the transpose() function
# This function reverses the directed edges in the given graph
def transpose(graph):
# List of tuples representing edges
# (a,b) = directed edge from a to b
edges = []
# Get all edges, then clear adjacency lists
for node in graph:
nodeObj = graph[node]
for edge in nodeObj.adjNodes:
edges.append((node,edge))
nodeObj.adjNodes.clear()
# Re-add all edges in reverse
for edge in edges:
graph[edge[1]].adjNodes.append(edge[0])
# Define the scc_dfs() function
# This function is used when finding SCCs, it uses the
# reverseFinishingTime parameter to look at the vertices
# in that order. Follows the algorithm from our book, p. 604
def scc_dfs(graph, reverseFinishingTime):
# Color every node white, predecessor = none
for node in graph:
graph[node].color = "white"
graph[node].pi = None
# Declare the global time variable, set to 0
global time
time = 0
# List to hold our components
components = []
# Consider nodes in reverse finishing time
for node in reverseFinishingTime:
if(graph[node].color == "white"):
# Visit node
dfs_visit(graph, graph[node])
# Get new components, add to our list
# comp is a set of new components
# This is not a super efficient way to do this,
# but it works.
comp = set()
for item in graph:
if(graph[item].color == "black"):
comp.add(item)
# Don't include items we already found
for item in components:
comp = comp - item
# Add the new components
components.append(comp)
return components
# Define the dfs() function
# Follows algorithm from p. 604 of textbook
def dfs(graph):
for node in graph:
graph[node].color = "white"
graph[node].pi = None
global time
time = 0
for node in graph:
if(graph[node].color == "white"):
dfs_visit(graph, graph[node])
# Define the dvs_visit() function
# Follows algorithm from p. 604 of textbook
def dfs_visit(graph, node):
global time
time += 1
node.d = time
node.color = "gray"
for edge in node.adjNodes:
if(graph[edge].color == "white"):
graph[edge].pi = node.value
dfs_visit(graph, graph[edge])
node.color = "black"
time = time + 1
node.f = time
###############################################################################
# Create lists to store topics, dependencies
topics = []
dependencies = []
# Read input from stdin
while True:
try:
# Store everything in topics list for now
line = input()
topics.append(line)
except EOFError:
break
# Get n and m
n = int(topics[0])
m = int(topics[n+1])
# Clean up lists, separate into two lists
dependencies = topics[n+2:]
topics = topics[1:n+1]
# Store graph as dictionary
# keys = topic, value = Node object
graph = {}
# Add topics to dictionary
for item in topics:
graph[item] = Node(item)
# Create edges in graph
for item in dependencies:
split = item.split(" ")
graph[split[0]].adjNodes.append(split[1])
# Call dfs()
dfs(graph)
# Call transpose(), reverse edges
transpose(graph)
# Get nodes in order of reverse finishing time
reverseFinishingTime = []
# Add to list as a tuple
# (topic, finishing time)
for node in graph:
reverseFinishingTime.append((graph[node].value,graph[node].f))
# Sort in reverse order by finishing time
reverseFinishingTime.sort(key=operator.itemgetter(1), reverse=True)
# Get rid of finishing times in tuple
reverseFinishingTime = [x[0] for x in reverseFinishingTime]
# Call scc_dfs()
components = scc_dfs(graph, reverseFinishingTime)
# Find the strongly connected components in our components list
scc = []
for comp in components:
# |S| > 1 condition
if(len(comp) > 1):
# Need to sort by order the topics appeared in input
temp = []
for item in comp:
temp.append((item,topics.index(item)))
temp.sort(key=operator.itemgetter(1))
temp = [x[0] for x in temp]
scc.append(temp)
# Sort first elements of SCC by order they appeared in input
scc2 = []
for item in scc:
scc2.append((item,topics.index(item[0])))
scc2.sort(key=operator.itemgetter(1))
scc2 = [x[0] for x in scc2]
# And finally, print out!
for item in scc2:
print(" ".join(item))
| |
"""
EvEditor (Evennia Line Editor)
This implements an advanced line editor for editing longer texts
in-game. The editor mimics the command mechanisms of the "VI" editor
(a famous line-by-line editor) as far as reasonable.
Features of the editor:
- undo/redo.
- edit/replace on any line of the buffer.
- search&replace text anywhere in buffer.
- formatting of buffer, or selection, to certain width + indentations.
- allow to echo the input or not, depending on your client.
To use the editor, just import EvEditor from this module
and initialize it:
from evennia.utils.eveditor import EvEditor
EvEditor(caller, loadfunc=None, savefunc=None, quitfunc=None, key="", persistent=True)
- caller is the user of the editor, the one to see all feedback.
- loadfunc(caller) is called when the editor is first launched; the
return from this function is loaded as the starting buffer in the
editor.
- safefunc(caller, buffer) is called with the current buffer when
saving in the editor. The function should return True/False depending
on if the saving was successful or not.
- quitfunc(caller) is called when the editor exits. If this is given,
no automatic quit messages will be given.
- key is an optional identifier for the editing session, to be
displayed in the editor.
- persistent means the editor state will be saved to the database making it
survive a server reload. Note that using this mode, the load- save-
and quit-funcs must all be possible to pickle - notable unusable
callables are class methods and functions defined inside other
functions. With persistent=False, no such restriction exists.
- code set to True activates features on the EvEditor to enter Python code.
In addition, the EvEditor can be used to enter Python source code,
and offers basic handling of indentation.
"""
from builtins import object
import re
from django.conf import settings
from evennia import Command, CmdSet
from evennia.utils import is_iter, fill, dedent, logger, justify, to_str
from evennia.utils.ansi import raw
from evennia.commands import cmdhandler
# we use cmdhandler instead of evennia.syscmdkeys to
# avoid some cases of loading before evennia init'd
_CMD_NOMATCH = cmdhandler.CMD_NOMATCH
_CMD_NOINPUT = cmdhandler.CMD_NOINPUT
_RE_GROUP = re.compile(r"\".*?\"|\'.*?\'|\S*")
# use NAWS in the future?
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
# -------------------------------------------------------------
#
# texts
#
# -------------------------------------------------------------
_HELP_TEXT = """
<txt> - any non-command is appended to the end of the buffer.
: <l> - view buffer or only line(s) <l>
:: <l> - raw-view buffer or only line(s) <l>
::: - escape - enter ':' as the only character on the line.
:h - this help.
:w - save the buffer (don't quit)
:wq - save buffer and quit
:q - quit (will be asked to save if buffer was changed)
:q! - quit without saving, no questions asked
:u - (undo) step backwards in undo history
:uu - (redo) step forward in undo history
:UU - reset all changes back to initial state
:dd <l> - delete last line or line(s) <l>
:dw <l> <w> - delete word or regex <w> in entire buffer or on line <l>
:DD - clear entire buffer
:y <l> - yank (copy) line(s) <l> to the copy buffer
:x <l> - cut line(s) <l> and store it in the copy buffer
:p <l> - put (paste) previously copied line(s) directly after <l>
:i <l> <txt> - insert new text <txt> at line <l>. Old line will move down
:r <l> <txt> - replace line <l> with text <txt>
:I <l> <txt> - insert text at the beginning of line <l>
:A <l> <txt> - append text after the end of line <l>
:s <l> <w> <txt> - search/replace word or regex <w> in buffer or on line <l>
:j <l> <w> - justify buffer or line <l>. <w> is f, c, l or r. Default f (full)
:f <l> - flood-fill entire buffer or line <l>: Equivalent to :j left
:fi <l> - indent entire buffer or line <l>
:fd <l> - de-indent entire buffer or line <l>
:echo - turn echoing of the input on/off (helpful for some clients)
"""
_HELP_LEGEND = """
Legend:
<l> - line number, like '5' or range, like '3:7'.
<w> - a single word, or multiple words with quotes around them.
<txt> - longer string, usually not needing quotes.
"""
_HELP_CODE = """
:! - Execute code buffer without saving
:< - Decrease the level of automatic indentation for the next lines
:> - Increase the level of automatic indentation for the next lines
:= - Switch automatic indentation on/off
""".lstrip("\n")
_ERROR_LOADFUNC = """
{error}
|rBuffer load function error. Could not load initial data.|n
"""
_ERROR_SAVEFUNC = """
{error}
|rSave function returned an error. Buffer not saved.|n
"""
_ERROR_NO_SAVEFUNC = "|rNo save function defined. Buffer cannot be saved.|n"
_MSG_SAVE_NO_CHANGE = "No changes need saving"
_DEFAULT_NO_QUITFUNC = "Exited editor."
_ERROR_QUITFUNC = """
{error}
|rQuit function gave an error. Skipping.|n
"""
_ERROR_PERSISTENT_SAVING = """
{error}
|rThe editor state could not be saved for persistent mode. Switching
to non-persistent mode (which means the editor session won't survive
an eventual server reload - so save often!)|n
"""
_TRACE_PERSISTENT_SAVING = \
"EvEditor persistent-mode error. Commonly, this is because one or " \
"more of the EvEditor callbacks could not be pickled, for example " \
"because it's a class method or is defined inside another function."
_MSG_NO_UNDO = "Nothing to undo."
_MSG_NO_REDO = "Nothing to redo."
_MSG_UNDO = "Undid one step."
_MSG_REDO = "Redid one step."
# -------------------------------------------------------------
#
# Handle yes/no quit question
#
# -------------------------------------------------------------
class CmdSaveYesNo(Command):
"""
Save the editor state on quit. This catches
nomatches (defaults to Yes), and avoid saves only if
command was given specifically as "no" or "n".
"""
key = _CMD_NOMATCH
aliases = _CMD_NOINPUT
locks = "cmd:all()"
help_cateogory = "LineEditor"
def func(self):
"""Implement the yes/no choice."""
# this is only called from inside the lineeditor
# so caller.ndb._lineditor must be set.
self.caller.cmdset.remove(SaveYesNoCmdSet)
if self.raw_string.strip().lower() in ("no", "n"):
# answered no
self.caller.msg(self.caller.ndb._eveditor.quit())
else:
# answered yes (default)
self.caller.ndb._eveditor.save_buffer()
self.caller.ndb._eveditor.quit()
class SaveYesNoCmdSet(CmdSet):
"""Stores the yesno question"""
key = "quitsave_yesno"
priority = 1
mergetype = "Replace"
def at_cmdset_creation(self):
"""at cmdset creation"""
self.add(CmdSaveYesNo())
# -------------------------------------------------------------
#
# Editor commands
#
# -------------------------------------------------------------
class CmdEditorBase(Command):
"""
Base parent for editor commands
"""
locks = "cmd:all()"
help_entry = "LineEditor"
editor = None
def parse(self):
"""
Handles pre-parsing
Editor commands are on the form
:cmd [li] [w] [txt]
Where all arguments are optional.
li - line number (int), starting from 1. This could also
be a range given as <l>:<l>.
w - word(s) (string), could be encased in quotes.
txt - extra text (string), could be encased in quotes.
"""
editor = self.caller.ndb._eveditor
if not editor:
# this will completely replace the editor
_load_editor(self.caller)
editor = self.caller.ndb._eveditor
self.editor = editor
linebuffer = self.editor.get_buffer().split("\n")
nlines = len(linebuffer)
# The regular expression will split the line by whitespaces,
# stripping extra whitespaces, except if the text is
# surrounded by single- or double quotes, in which case they
# will be kept together and extra whitespace preserved. You
# can input quotes on the line by alternating single and
# double quotes.
arglist = [part for part in _RE_GROUP.findall(self.args) if part]
temp = []
for arg in arglist:
# we want to clean the quotes, but only one type,
# in case we are nesting.
if arg.startswith('"'):
arg.strip('"')
elif arg.startswith("'"):
arg.strip("'")
temp.append(arg)
arglist = temp
# A dumb split, without grouping quotes
words = self.args.split()
# current line number
cline = nlines - 1
# the first argument could also be a range of line numbers, on the
# form <lstart>:<lend>. Either of the ends could be missing, to
# mean start/end of buffer respectively.
lstart, lend = cline, cline + 1
linerange = False
if arglist and arglist[0].count(':') == 1:
part1, part2 = arglist[0].split(':')
if part1 and part1.isdigit():
lstart = min(max(0, int(part1)) - 1, nlines)
linerange = True
if part2 and part2.isdigit():
lend = min(lstart + 1, int(part2)) + 1
linerange = True
elif arglist and arglist[0].isdigit():
lstart = min(max(0, int(arglist[0]) - 1), nlines)
lend = lstart + 1
linerange = True
if linerange:
arglist = arglist[1:]
# nicer output formatting of the line range.
lstr = "line %i" % (lstart + 1) if not linerange or lstart + 1 == lend else "lines %i-%i" % (lstart + 1, lend)
# arg1 and arg2 is whatever arguments. Line numbers or -ranges are
# never included here.
args = " ".join(arglist)
arg1, arg2 = "", ""
if len(arglist) > 1:
arg1, arg2 = arglist[0], " ".join(arglist[1:])
else:
arg1 = " ".join(arglist)
# store for use in func()
self.linebuffer = linebuffer
self.nlines = nlines
self.arglist = arglist
self.cline = cline
self.lstart = lstart
self.lend = lend
self.linerange = linerange
self.lstr = lstr
self.words = words
self.args = args
self.arg1 = arg1
self.arg2 = arg2
def _load_editor(caller):
"""
Load persistent editor from storage.
"""
saved_options = caller.attributes.get("_eveditor_saved")
saved_buffer, saved_undo = caller.attributes.get("_eveditor_buffer_temp", (None, None))
unsaved = caller.attributes.get("_eveditor_unsaved", False)
indent = caller.attributes.get("_eveditor_indent", 0)
if saved_options:
eveditor = EvEditor(caller, **saved_options[0])
if saved_buffer:
# we have to re-save the buffer data so we can handle subsequent restarts
caller.attributes.add("_eveditor_buffer_temp", (saved_buffer, saved_undo))
setattr(eveditor, "_buffer", saved_buffer)
setattr(eveditor, "_undo_buffer", saved_undo)
setattr(eveditor, "_undo_pos", len(saved_undo) - 1)
setattr(eveditor, "_unsaved", unsaved)
setattr(eveditor, "_indent", indent)
for key, value in saved_options[1].iteritems():
setattr(eveditor, key, value)
else:
# something went wrong. Cleanup.
caller.cmdset.remove(EvEditorCmdSet)
class CmdLineInput(CmdEditorBase):
"""
No command match - Inputs line of text into buffer.
"""
key = _CMD_NOMATCH
aliases = _CMD_NOINPUT
def func(self):
"""
Adds the line without any formatting changes.
If the editor handles code, it might add automatic
indentation.
"""
caller = self.caller
editor = caller.ndb._eveditor
buf = editor.get_buffer()
# add a line of text to buffer
line = self.raw_string.strip("\r\n")
if editor._codefunc and editor._indent >= 0:
# if automatic indentation is active, add spaces
line = editor.deduce_indent(line, buf)
buf = line if not buf else buf + "\n%s" % line
self.editor.update_buffer(buf)
if self.editor._echo_mode:
# need to do it here or we will be off one line
cline = len(self.editor.get_buffer().split('\n'))
if editor._codefunc:
# display the current level of identation
indent = editor._indent
if indent < 0:
indent = "off"
self.caller.msg("|b%02i|||n (|g%s|n) %s" % (
cline, indent, raw(line)))
else:
self.caller.msg("|b%02i|||n %s" % (cline, raw(self.args)))
class CmdEditorGroup(CmdEditorBase):
"""
Commands for the editor
"""
key = ":editor_command_group"
aliases = [":", "::", ":::", ":h", ":w", ":wq", ":q", ":q!", ":u", ":uu", ":UU",
":dd", ":dw", ":DD", ":y", ":x", ":p", ":i", ":j",
":r", ":I", ":A", ":s", ":S", ":f", ":fi", ":fd", ":echo",
":!", ":<", ":>", ":="]
arg_regex = r"\s.*?|$"
def func(self):
"""
This command handles all the in-editor :-style commands. Since
each command is small and very limited, this makes for a more
efficient presentation.
"""
caller = self.caller
editor = caller.ndb._eveditor
linebuffer = self.linebuffer
lstart, lend = self.lstart, self.lend
cmd = self.cmdstring
echo_mode = self.editor._echo_mode
if cmd == ":":
# Echo buffer
if self.linerange:
buf = linebuffer[lstart:lend]
editor.display_buffer(buf=buf, offset=lstart)
else:
editor.display_buffer()
elif cmd == "::":
# Echo buffer without the line numbers and syntax parsing
if self.linerange:
buf = linebuffer[lstart:lend]
editor.display_buffer(buf=buf,
offset=lstart,
linenums=False, options={"raw": True})
else:
editor.display_buffer(linenums=False, options={"raw": True})
elif cmd == ":::":
# Insert single colon alone on a line
editor.update_buffer([":"] if lstart == 0 else linebuffer + [":"])
if echo_mode:
caller.msg("Single ':' added to buffer.")
elif cmd == ":h":
# help entry
editor.display_help()
elif cmd == ":w":
# save without quitting
editor.save_buffer()
elif cmd == ":wq":
# save and quit
editor.save_buffer()
editor.quit()
elif cmd == ":q":
# quit. If not saved, will ask
if self.editor._unsaved:
caller.cmdset.add(SaveYesNoCmdSet)
caller.msg("Save before quitting? |lcyes|lt[Y]|le/|lcno|ltN|le")
else:
editor.quit()
elif cmd == ":q!":
# force quit, not checking saving
editor.quit()
elif cmd == ":u":
# undo
editor.update_undo(-1)
elif cmd == ":uu":
# redo
editor.update_undo(1)
elif cmd == ":UU":
# reset buffer
editor.update_buffer(editor._pristine_buffer)
caller.msg("Reverted all changes to the buffer back to original state.")
elif cmd == ":dd":
# :dd <l> - delete line <l>
buf = linebuffer[:lstart] + linebuffer[lend:]
editor.update_buffer(buf)
caller.msg("Deleted %s." % self.lstr)
elif cmd == ":dw":
# :dw <w> - delete word in entire buffer
# :dw <l> <w> delete word only on line(s) <l>
if not self.arg1:
caller.msg("You must give a search word to delete.")
else:
if not self.linerange:
lstart = 0
lend = self.cline + 1
caller.msg("Removed %s for lines %i-%i." % (self.arg1, lstart + 1, lend + 1))
else:
caller.msg("Removed %s for %s." % (self.arg1, self.lstr))
sarea = "\n".join(linebuffer[lstart:lend])
sarea = re.sub(r"%s" % self.arg1.strip("\'").strip('\"'), "", sarea, re.MULTILINE)
buf = linebuffer[:lstart] + sarea.split("\n") + linebuffer[lend:]
editor.update_buffer(buf)
elif cmd == ":DD":
# clear buffer
editor.update_buffer("")
# Reset indentation level to 0
if editor._codefunc:
if editor._indent >= 0:
editor._indent = 0
if editor._persistent:
caller.attributes.add("_eveditor_indent", 0)
caller.msg("Cleared %i lines from buffer." % self.nlines)
elif cmd == ":y":
# :y <l> - yank line(s) to copy buffer
cbuf = linebuffer[lstart:lend]
editor._copy_buffer = cbuf
caller.msg("%s, %s yanked." % (self.lstr.capitalize(), cbuf))
elif cmd == ":x":
# :x <l> - cut line to copy buffer
cbuf = linebuffer[lstart:lend]
editor._copy_buffer = cbuf
buf = linebuffer[:lstart] + linebuffer[lend:]
editor.update_buffer(buf)
caller.msg("%s, %s cut." % (self.lstr.capitalize(), cbuf))
elif cmd == ":p":
# :p <l> paste line(s) from copy buffer
if not editor._copy_buffer:
caller.msg("Copy buffer is empty.")
else:
buf = linebuffer[:lstart] + editor._copy_buffer + linebuffer[lstart:]
editor.update_buffer(buf)
caller.msg("Copied buffer %s to %s." % (editor._copy_buffer, self.lstr))
elif cmd == ":i":
# :i <l> <txt> - insert new line
new_lines = self.args.split('\n')
if not new_lines:
caller.msg("You need to enter a new line and where to insert it.")
else:
buf = linebuffer[:lstart] + new_lines + linebuffer[lstart:]
editor.update_buffer(buf)
caller.msg("Inserted %i new line(s) at %s." % (len(new_lines), self.lstr))
elif cmd == ":r":
# :r <l> <txt> - replace lines
new_lines = self.args.split('\n')
if not new_lines:
caller.msg("You need to enter a replacement string.")
else:
buf = linebuffer[:lstart] + new_lines + linebuffer[lend:]
editor.update_buffer(buf)
caller.msg("Replaced %i line(s) at %s." % (len(new_lines), self.lstr))
elif cmd == ":I":
# :I <l> <txt> - insert text at beginning of line(s) <l>
if not self.raw_string and not editor._codefunc:
caller.msg("You need to enter text to insert.")
else:
buf = linebuffer[:lstart] + ["%s%s" % (self.args, line)
for line in linebuffer[lstart:lend]] + linebuffer[lend:]
editor.update_buffer(buf)
caller.msg("Inserted text at beginning of %s." % self.lstr)
elif cmd == ":A":
# :A <l> <txt> - append text after end of line(s)
if not self.args:
caller.msg("You need to enter text to append.")
else:
buf = linebuffer[:lstart] + ["%s%s" % (line, self.args)
for line in linebuffer[lstart:lend]] + linebuffer[lend:]
editor.update_buffer(buf)
caller.msg("Appended text to end of %s." % self.lstr)
elif cmd == ":s":
# :s <li> <w> <txt> - search and replace words
# in entire buffer or on certain lines
if not self.arg1 or not self.arg2:
caller.msg("You must give a search word and something to replace it with.")
else:
if not self.linerange:
lstart = 0
lend = self.cline + 1
caller.msg("Search-replaced %s -> %s for lines %i-%i." % (self.arg1, self.arg2, lstart + 1, lend))
else:
caller.msg("Search-replaced %s -> %s for %s." % (self.arg1, self.arg2, self.lstr))
sarea = "\n".join(linebuffer[lstart:lend])
regex = r"%s|^%s(?=\s)|(?<=\s)%s(?=\s)|^%s$|(?<=\s)%s$"
regarg = self.arg1.strip("\'").strip('\"')
if " " in regarg:
regarg = regarg.replace(" ", " +")
sarea = re.sub(regex % (regarg, regarg, regarg, regarg, regarg), self.arg2.strip("\'").strip('\"'),
sarea, re.MULTILINE)
buf = linebuffer[:lstart] + sarea.split("\n") + linebuffer[lend:]
editor.update_buffer(buf)
elif cmd == ":f":
# :f <l> flood-fill buffer or <l> lines of buffer.
width = _DEFAULT_WIDTH
if not self.linerange:
lstart = 0
lend = self.cline + 1
caller.msg("Flood filled lines %i-%i." % (lstart + 1, lend))
else:
caller.msg("Flood filled %s." % self.lstr)
fbuf = "\n".join(linebuffer[lstart:lend])
fbuf = fill(fbuf, width=width)
buf = linebuffer[:lstart] + fbuf.split("\n") + linebuffer[lend:]
editor.update_buffer(buf)
elif cmd == ":j":
# :f <l> <w> justify buffer of <l> with <w> as align (one of
# f(ull), c(enter), r(ight) or l(left). Default is full.
align_map = {"full": "f", "f": "f", "center": "c", "c": "c",
"right": "r", "r": "r", "left": "l", "l": "l"}
align_name = {"f": "Full", "c": "Center", "l": "Left", "r": "Right"}
width = _DEFAULT_WIDTH
if self.arg1 and self.arg1.lower() not in align_map:
self.caller.msg("Valid justifications are [f]ull (default), [c]enter, [r]right or [l]eft")
return
align = align_map[self.arg1.lower()] if self.arg1 else 'f'
if not self.linerange:
lstart = 0
lend = self.cline + 1
self.caller.msg("%s-justified lines %i-%i." % (align_name[align], lstart + 1, lend))
else:
self.caller.msg("%s-justified %s." % (align_name[align], self.lstr))
jbuf = "\n".join(linebuffer[lstart:lend])
jbuf = justify(jbuf, width=width, align=align)
buf = linebuffer[:lstart] + jbuf.split("\n") + linebuffer[lend:]
editor.update_buffer(buf)
elif cmd == ":fi":
# :fi <l> indent buffer or lines <l> of buffer.
indent = " " * 4
if not self.linerange:
lstart = 0
lend = self.cline + 1
caller.msg("Indented lines %i-%i." % (lstart + 1, lend))
else:
caller.msg("Indented %s." % self.lstr)
fbuf = [indent + line for line in linebuffer[lstart:lend]]
buf = linebuffer[:lstart] + fbuf + linebuffer[lend:]
editor.update_buffer(buf)
elif cmd == ":fd":
# :fi <l> indent buffer or lines <l> of buffer.
if not self.linerange:
lstart = 0
lend = self.cline + 1
caller.msg("Removed left margin (dedented) lines %i-%i." % (lstart + 1, lend))
else:
caller.msg("Removed left margin (dedented) %s." % self.lstr)
fbuf = "\n".join(linebuffer[lstart:lend])
fbuf = dedent(fbuf)
buf = linebuffer[:lstart] + fbuf.split("\n") + linebuffer[lend:]
editor.update_buffer(buf)
elif cmd == ":echo":
# set echoing on/off
editor._echo_mode = not editor._echo_mode
caller.msg("Echo mode set to %s" % editor._echo_mode)
elif cmd == ":!":
if editor._codefunc:
editor._codefunc(caller, editor._buffer)
else:
caller.msg("This command is only available in code editor mode.")
elif cmd == ":<":
# :<
if editor._codefunc:
editor.decrease_indent()
indent = editor._indent
if indent >= 0:
caller.msg("Decreased indentation: new indentation is {}.".format(
indent))
else:
caller.msg("|rManual indentation is OFF.|n Use := to turn it on.")
else:
caller.msg("This command is only available in code editor mode.")
elif cmd == ":>":
# :>
if editor._codefunc:
editor.increase_indent()
indent = editor._indent
if indent >= 0:
caller.msg("Increased indentation: new indentation is {}.".format(
indent))
else:
caller.msg("|rManual indentation is OFF.|n Use := to turn it on.")
else:
caller.msg("This command is only available in code editor mode.")
elif cmd == ":=":
# :=
if editor._codefunc:
editor.swap_autoindent()
indent = editor._indent
if indent >= 0:
caller.msg("Auto-indentation turned on.")
else:
caller.msg("Auto-indentation turned off.")
else:
caller.msg("This command is only available in code editor mode.")
class EvEditorCmdSet(CmdSet):
"""CmdSet for the editor commands"""
key = "editorcmdset"
mergetype = "Replace"
def at_cmdset_creation(self):
self.add(CmdLineInput())
self.add(CmdEditorGroup())
# -------------------------------------------------------------
#
# Main Editor object
#
# -------------------------------------------------------------
class EvEditor(object):
"""
This defines a line editor object. It creates all relevant commands
and tracks the current state of the buffer. It also cleans up after
itself.
"""
def __init__(self, caller, loadfunc=None, savefunc=None,
quitfunc=None, key="", persistent=False, codefunc=False):
"""
Launches a full in-game line editor, mimicking the functionality of VIM.
Args:
caller (Object): Who is using the editor.
loadfunc (callable, optional): This will be called as
`loadfunc(caller)` when the editor is first started. Its
return will be used as the editor's starting buffer.
savefunc (callable, optional): This will be called as
`savefunc(caller, buffer)` when the save-command is given and
is used to actually determine where/how result is saved.
It should return `True` if save was successful and also
handle any feedback to the user.
quitfunc (callable, optional): This will optionally be
called as `quitfunc(caller)` when the editor is
exited. If defined, it should handle all wanted feedback
to the user.
quitfunc_args (tuple, optional): Optional tuple of arguments to
supply to `quitfunc`.
key (str, optional): An optional key for naming this
session and make it unique from other editing sessions.
persistent (bool, optional): Make the editor survive a reboot. Note
that if this is set, all callables must be possible to pickle
codefunc (bool, optional): If given, will run the editor in code mode.
This will be called as `codefunc(caller, buf)`.
Notes:
In persistent mode, all the input callables (savefunc etc)
must be possible to be *pickled*, this excludes e.g.
callables that are class methods or functions defined
dynamically or as part of another function. In
non-persistent mode no such restrictions exist.
"""
self._key = key
self._caller = caller
self._caller.ndb._eveditor = self
self._buffer = ""
self._unsaved = False
self._persistent = persistent
self._indent = 0
if loadfunc:
self._loadfunc = loadfunc
else:
self._loadfunc = lambda caller: self._buffer
self.load_buffer()
if savefunc:
self._savefunc = savefunc
else:
self._savefunc = lambda caller, buffer: caller.msg(_ERROR_NO_SAVEFUNC)
if quitfunc:
self._quitfunc = quitfunc
else:
self._quitfunc = lambda caller: caller.msg(_DEFAULT_NO_QUITFUNC)
self._codefunc = codefunc
# store the original version
self._pristine_buffer = self._buffer
self._sep = "-"
# undo operation buffer
self._undo_buffer = [self._buffer]
self._undo_pos = 0
self._undo_max = 20
# copy buffer
self._copy_buffer = []
if persistent:
# save in tuple {kwargs, other options}
try:
caller.attributes.add("_eveditor_saved", (
dict(loadfunc=loadfunc, savefunc=savefunc, quitfunc=quitfunc,
codefunc=codefunc, key=key, persistent=persistent),
dict(_pristine_buffer=self._pristine_buffer, _sep=self._sep)))
caller.attributes.add("_eveditor_buffer_temp", (self._buffer, self._undo_buffer))
caller.attributes.add("_eveditor_unsaved", False)
caller.attributes.add("_eveditor_indent", 0)
except Exception as err:
caller.msg(_ERROR_PERSISTENT_SAVING.format(error=err))
logger.log_trace(_TRACE_PERSISTENT_SAVING)
persistent = False
# Create the commands we need
caller.cmdset.add(EvEditorCmdSet, permanent=persistent)
# echo inserted text back to caller
self._echo_mode = True
# show the buffer ui
self.display_buffer()
def load_buffer(self):
"""
Load the buffer using the load function hook.
"""
try:
self._buffer = self._loadfunc(self._caller)
if not isinstance(self._buffer, basestring):
self._buffer = to_str(self._buffer, force_string=True)
self._caller.msg("|rNote: input buffer was converted to a string.|n")
except Exception as e:
from evennia.utils import logger
logger.log_trace()
self._caller.msg(_ERROR_LOADFUNC.format(error=e))
def get_buffer(self):
"""
Return:
buffer (str): The current buffer.
"""
return self._buffer
def update_buffer(self, buf):
"""
This should be called when the buffer has been changed
somehow. It will handle unsaved flag and undo updating.
Args:
buf (str): The text to update the buffer with.
"""
if is_iter(buf):
buf = "\n".join(buf)
if buf != self._buffer:
self._buffer = buf
self.update_undo()
self._unsaved = True
if self._persistent:
self._caller.attributes.add("_eveditor_buffer_temp", (self._buffer, self._undo_buffer))
self._caller.attributes.add("_eveditor_unsaved", True)
self._caller.attributes.add("_eveditor_indent", self._indent)
def quit(self):
"""
Cleanly exit the editor.
"""
try:
self._quitfunc(self._caller)
except Exception as e:
self._caller.msg(_ERROR_QUITFUNC.format(error=e))
self._caller.nattributes.remove("_eveditor")
self._caller.attributes.remove("_eveditor_buffer_temp")
self._caller.attributes.remove("_eveditor_saved")
self._caller.attributes.remove("_eveditor_unsaved")
self._caller.attributes.remove("_eveditor_indent")
self._caller.cmdset.remove(EvEditorCmdSet)
def save_buffer(self):
"""
Saves the content of the buffer.
"""
if self._unsaved or self._codefunc:
# always save code - this allows us to tie execution to
# saving if we want.
try:
if self._savefunc(self._caller, self._buffer):
# Save codes should return a true value to indicate
# save worked. The saving function is responsible for
# any status messages.
self._unsaved = False
except Exception as e:
self._caller.msg(_ERROR_SAVEFUNC.format(error=e))
else:
self._caller.msg(_MSG_SAVE_NO_CHANGE)
def update_undo(self, step=None):
"""
This updates the undo position.
Args:
step (int, optional): The amount of steps
to progress the undo position to. This
may be a negative value for undo and
a positive value for redo.
"""
if step and step < 0:
# undo
if self._undo_pos <= 0:
self._caller.msg(_MSG_NO_UNDO)
else:
self._undo_pos = max(0, self._undo_pos + step)
self._buffer = self._undo_buffer[self._undo_pos]
self._caller.msg(_MSG_UNDO)
elif step and step > 0:
# redo
if self._undo_pos >= len(self._undo_buffer) - 1 or self._undo_pos + 1 >= self._undo_max:
self._caller.msg(_MSG_NO_REDO)
else:
self._undo_pos = min(self._undo_pos + step, min(len(self._undo_buffer), self._undo_max) - 1)
self._buffer = self._undo_buffer[self._undo_pos]
self._caller.msg(_MSG_REDO)
if not self._undo_buffer or (self._undo_buffer and self._buffer != self._undo_buffer[self._undo_pos]):
# save undo state
self._undo_buffer = self._undo_buffer[:self._undo_pos + 1] + [self._buffer]
self._undo_pos = len(self._undo_buffer) - 1
def display_buffer(self, buf=None, offset=0, linenums=True, options={"raw": False}):
"""
This displays the line editor buffer, or selected parts of it.
Args:
buf (str, optional): The buffer or part of buffer to display.
offset (int, optional): If `buf` is set and is not the full buffer,
`offset` should define the actual starting line number, to
get the linenum display right.
linenums (bool, optional): Show line numbers in buffer.
options: raw (bool, optional): Tell protocol to not parse
formatting information.
"""
if buf is None:
buf = self._buffer
if is_iter(buf):
buf = "\n".join(buf)
lines = buf.split('\n')
nlines = len(lines)
nwords = len(buf.split())
nchars = len(buf)
sep = self._sep
header = "|n" + sep * 10 + "Line Editor [%s]" % self._key + sep * (_DEFAULT_WIDTH - 20 - len(self._key))
footer = "|n" + sep * 10 +\
"[l:%02i w:%03i c:%04i]" % (nlines, nwords, nchars) + sep * 12 + "(:h for help)" + sep * 28
if linenums:
main = "\n".join("|b%02i|||n %s" % (iline + 1 + offset, raw(line)) for iline, line in enumerate(lines))
else:
main = "\n".join([raw(line) for line in lines])
string = "%s\n%s\n%s" % (header, main, footer)
self._caller.msg(string, options=options)
def display_help(self):
"""
Shows the help entry for the editor.
"""
string = self._sep * _DEFAULT_WIDTH + _HELP_TEXT
if self._codefunc:
string += _HELP_CODE
string += _HELP_LEGEND + self._sep * _DEFAULT_WIDTH
self._caller.msg(string)
def deduce_indent(self, line, buffer):
"""
Try to deduce the level of indentation of the given line.
"""
keywords = {
"elif ": ["if "],
"else:": ["if ", "try"],
"except": ["try:"],
"finally:": ["try:"],
}
opening_tags = ("if ", "try:", "for ", "while ")
# If the line begins by one of the given keywords
indent = self._indent
if any(line.startswith(kw) for kw in keywords.keys()):
# Get the keyword and matching begin tags
keyword = [kw for kw in keywords if line.startswith(kw)][0]
begin_tags = keywords[keyword]
for oline in reversed(buffer.splitlines()):
if any(oline.lstrip(" ").startswith(tag) for tag in begin_tags):
# This line begins with a begin tag, takes the identation
indent = (len(oline) - len(oline.lstrip(" "))) / 4
break
self._indent = indent + 1
if self._persistent:
self._caller.attributes.add("_eveditor_indent", self._indent)
elif any(line.startswith(kw) for kw in opening_tags):
self._indent = indent + 1
if self._persistent:
self._caller.attributes.add("_eveditor_indent", self._indent)
line = " " * 4 * indent + line
return line
def decrease_indent(self):
"""Decrease automatic indentation by 1 level."""
if self._codefunc and self._indent > 0:
self._indent -= 1
if self._persistent:
self._caller.attributes.add("_eveditor_indent", self._indent)
def increase_indent(self):
"""Increase automatic indentation by 1 level."""
if self._codefunc and self._indent >= 0:
self._indent += 1
if self._persistent:
self._caller.attributes.add("_eveditor_indent", self._indent)
def swap_autoindent(self):
"""Swap automatic indentation on or off."""
if self._codefunc:
if self._indent >= 0:
self._indent = -1
else:
self._indent = 0
if self._persistent:
self._caller.attributes.add("_eveditor_indent", self._indent)
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import fnmatch
import imp
import logging
import os
import sys
import zipfile
from telemetry.internal.util import command_line
from telemetry.internal.util import path
from telemetry.internal.util import path_set
try:
from modulegraph import modulegraph # pylint: disable=import-error
except ImportError as err:
modulegraph = None
import_error = err
from core import bootstrap
from core import path_util
DEPS_FILE = 'bootstrap_deps'
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(os.path.realpath(os.path.join(
path_util.GetChromiumSrcDir(), '..', deps_path))
for deps_path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s', module_path)
if modulegraph is None:
raise import_error
prefixes = [sys.prefix]
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
logging.info('Excluding Prefixes: %r', prefixes)
sys_path = sys.path
sys.path = list(sys_path)
try:
# Load the module to inherit its sys.path modifications.
sys.path.insert(0, os.path.abspath(os.path.dirname(module_path)))
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
graph = modulegraph.ModuleGraph()
graph.run_script(module_path)
# Filter for only imports in Chromium.
for node in graph.nodes():
if not node.filename:
continue
module_path = os.path.realpath(node.filename)
_, incoming_edges = graph.get_edges(node)
message = 'Discovered %s (Imported by: %s)' % (
node.filename, ', '.join(
d.filename for d in incoming_edges
if d is not None and d.filename is not None))
logging.info(message)
# This check is done after the logging/printing above to make sure that
# we also print out the dependency edges that include python packages
# that are not in chromium.
if not path.IsSubpath(module_path, path_util.GetChromiumSrcDir()):
continue
# Exclude any dependencies which exist in the python installation.
if any(path.IsSubpath(module_path, pfx) for pfx in prefixes):
continue
yield module_path
if node.packagepath is not None:
for p in node.packagepath:
yield p
finally:
sys.path = sys_path
def FindExcludedFiles(files, options):
# Define some filters for files.
def IsHidden(path_string):
for pathname_component in path_string.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path_string):
return os.path.splitext(path_string)[1] == '.pyc'
def IsInCloudStorage(path_string):
return os.path.exists(path_string + '.sha1')
def MatchesExcludeOptions(path_string):
for pattern in options.exclude:
if (fnmatch.fnmatch(path_string, pattern) or
fnmatch.fnmatch(os.path.basename(path_string), pattern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for file_path in files:
if any(condition(file_path) for condition in exclude_conditions):
yield file_path
def FindDependencies(target_paths, options):
# Verify arguments.
for target_path in target_paths:
if not os.path.exists(target_path):
raise ValueError('Path does not exist: %s' % target_path)
dependencies = path_set.PathSet()
# Including Telemetry's major entry points will (hopefully) include Telemetry
# and all its dependencies. If the user doesn't pass any arguments, we just
# have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path_util.GetTelemetryDir(),
'telemetry', 'command_line', 'parser.py')))
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path_util.GetTelemetryDir(),
'telemetry', 'testing', 'run_tests.py')))
# Add dependencies.
for target_path in target_paths:
base_dir = os.path.dirname(os.path.realpath(target_path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(target_path)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(target_paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(path_util.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for dependency_path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(dependency_path, base_dir))
zip_file.write(dependency_path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for target_path in target_paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(target_path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
relative_path = os.path.relpath(target_path, base_dir)
link_script = (
'#!/usr/bin/env vpython\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed).')
parser.add_option(
'-e', '--exclude', action='append', default=[],
help='Exclude paths matching EXCLUDE. Can be used multiple times.')
parser.add_option(
'-z', '--zip',
help='Store files in a zip archive at ZIP.')
@classmethod
def ProcessCommandLineArgs(cls, parser, args, _):
if args.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
def Run(self, args):
target_paths = args.positional_args
dependencies = FindDependencies(target_paths, args)
if args.zip:
ZipDependencies(target_paths, dependencies, args)
print('Zip archive written to %s.' % args.zip)
else:
print('\n'.join(sorted(dependencies)))
return 0
| |
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.openstack.common import jsonutils as json
from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex
from sahara.plugins.hdp.versions import versionhandlerfactory as vhf
LOG = logging.getLogger(__name__)
class ClusterSpec():
def __init__(self, config, version='1.3.2'):
self._config_template = config
self.services = []
self.configurations = {}
self.node_groups = {}
self.version = version
self.user_input_handlers = {}
cluster_template = json.loads(config)
self._parse_services(cluster_template)
self._parse_configurations(cluster_template)
self._process_node_groups(template_json=cluster_template)
def create_operational_config(self, cluster, user_inputs,
scaled_groups=None):
if scaled_groups is None:
scaled_groups = {}
self._determine_deployed_services(cluster)
self._process_node_groups(cluster=cluster)
for ng_id in scaled_groups:
existing = next(group for group in self.node_groups.values()
if group.id == ng_id)
existing.count = scaled_groups[ng_id]
self.validate_node_groups(cluster)
self._finalize_ng_components()
self._parse_configurations(json.loads(self._config_template))
self._process_user_inputs(user_inputs)
self._replace_config_tokens()
def scale(self, updated_groups):
for ng_id in updated_groups:
existing = next(group for group in self.node_groups.values()
if group.id == ng_id)
existing.count = updated_groups[ng_id]
def validate_node_groups(self, cluster):
for service in self.services:
if service.deployed:
service.validate(self, cluster)
elif service.is_mandatory():
raise ex.RequiredServiceMissingException(service.name)
def get_deployed_configurations(self):
configs = set()
for service in self.services:
if service.deployed:
configs |= service.configurations
return configs
def determine_component_hosts(self, component):
hosts = set()
for ng in self.node_groups.values():
if component in ng.components:
hosts |= ng.instances
return hosts
def normalize(self):
return NormalizedClusterConfig(self)
def get_deployed_node_group_count(self, name):
count = 0
for ng in self.get_node_groups_containing_component(name):
count += ng.count
return count
def get_node_groups_containing_component(self, component):
found_node_groups = []
for ng in self.node_groups.values():
if component in ng.components:
found_node_groups.append(ng)
return found_node_groups
def get_components_for_type(self, type):
components = set()
for service in self.services:
for component in service.components:
if component.type == type:
components.add(component.name)
return components
def _parse_services(self, template_json):
handler = (vhf.VersionHandlerFactory.get_instance().
get_version_handler(self.version))
sp = handler.get_services_processor()
for s in template_json['services']:
name = s['name']
service = sp.create_service(name)
self.services.append(service)
for c in s['components']:
component = Component(c['name'], c['type'], c['cardinality'])
service.add_component(component)
if 'users' in s:
for u in s['users']:
user = User(u['name'], u['password'], u['groups'])
service.add_user(user)
configs = self._parse_configurations(s)
for config in configs:
service.add_configuration(config)
def _parse_configurations(self, template_json):
config_names = []
for config in template_json['configurations']:
config_props = {}
name = config['name']
config_names.append(name)
if name in self.configurations:
config_props = self.configurations[name]
else:
self.configurations[name] = config_props
if 'properties' in config:
for prop in config['properties']:
config_props[prop['name']] = prop['value']
return config_names
def _process_node_groups(self, template_json=None, cluster=None):
# get node_groups from config
if template_json and not cluster:
for group in template_json['host_role_mappings']:
node_group = NodeGroup(group['name'].lower())
for component in group['components']:
node_group.add_component(component['name'])
for host in group['hosts']:
if 'predicate' in host:
node_group.predicate = host['predicate']
if 'cardinality' in host:
node_group.cardinality = host['cardinality']
if 'default_count' in host:
node_group.count = host['default_count']
self.node_groups[node_group.name] = node_group
if cluster:
self.node_groups = {}
node_groups = cluster.node_groups
for ng in node_groups:
node_group = NodeGroup(ng.name.lower())
node_group.count = ng.count
node_group.id = ng.id
node_group.components = ng.node_processes[:]
node_group.ng_storage_paths = ng.storage_paths()
for instance in ng.instances:
node_group.instances.add(Instance(instance))
self.node_groups[node_group.name] = node_group
def _determine_deployed_services(self, cluster):
for ng in cluster.node_groups:
for service in self.services:
if service.deployed:
continue
for sc in service.components:
if sc.name in ng.node_processes:
service.deployed = True
service.register_user_input_handlers(
self.user_input_handlers)
break
def _process_user_inputs(self, user_inputs):
for ui in user_inputs:
user_input_handler = self.user_input_handlers.get(
'{0}/{1}'.format(ui.config.tag, ui.config.name),
self._default_user_input_handler)
user_input_handler(ui, self.configurations)
def _replace_config_tokens(self):
for service in self.services:
if service.deployed:
service.finalize_configuration(self)
def _finalize_ng_components(self):
for service in self.services:
if service.deployed:
service.finalize_ng_components(self)
def _default_user_input_handler(self, user_input, configurations):
config_map = configurations[user_input.config.tag]
config_map[user_input.config.name] = user_input.value
class Component():
def __init__(self, name, component_type, cardinality):
self.name = name
self.type = component_type
self.cardinality = cardinality
class NodeGroup():
def __init__(self, name):
self.id = None
self.name = name
self.components = []
self.predicate = None
self.cardinality = None
self.count = None
self.instances = set()
self.ng_storage_paths = []
def add_component(self, component):
self.components.append(component)
def storage_paths(self):
return self.ng_storage_paths
class User():
def __init__(self, name, password, groups):
self.name = name
self.password = password
self.groups = groups
class Instance():
def __init__(self, sahara_instance):
self.inst_fqdn = sahara_instance.fqdn()
self.management_ip = sahara_instance.management_ip
self.internal_ip = sahara_instance.internal_ip
self.sahara_instance = sahara_instance
def fqdn(self):
return self.inst_fqdn
def remote(self):
return self.sahara_instance.remote()
def __hash__(self):
return hash(self.fqdn())
def __eq__(self, other):
return self.fqdn() == other.fqdn()
class NormalizedClusterConfig():
def __init__(self, cluster_spec):
self.hadoop_version = cluster_spec.version
self.cluster_configs = []
self.node_groups = []
self.handler = (vhf.VersionHandlerFactory.get_instance().
get_version_handler(self.hadoop_version))
self._parse_configurations(cluster_spec.configurations)
self._parse_node_groups(cluster_spec.node_groups)
def _parse_configurations(self, configurations):
for config_name, properties in configurations.items():
for prop, value in properties.items():
target = self._get_property_target(prop)
if target:
prop_type = self._get_property_type(prop, value)
# TODO(sdpeidel): should we supply a scope?
self.cluster_configs.append(
NormalizedConfigEntry(NormalizedConfig(
prop, prop_type, value, target, 'cluster'),
value))
def _parse_node_groups(self, node_groups):
for node_group in node_groups.values():
self.node_groups.append(NormalizedNodeGroup(node_group))
def _get_property_target(self, prop):
return self.handler.get_applicable_target(prop)
def _get_property_type(self, prop, value):
# TODO(jspeidel): seems that all numeric prop values in default config
# are encoded as strings. This may be incorrect.
# TODO(jspeidel): should probably analyze string value to determine if
# it is numeric
# TODO(jspeidel): would then need to know whether Ambari expects a
# string or a numeric value
prop_type = type(value).__name__
# print 'Type: {0}'.format(prop_type)
if prop_type == 'str' or prop_type == 'unicode' or value == '':
return 'string'
elif prop_type == 'int':
return 'integer'
elif prop_type == 'bool':
return 'boolean'
else:
raise ValueError(
"Could not determine property type for property '{0}' with "
"value: {1}".
format(prop, value))
class NormalizedConfig():
def __init__(self, name, config_type, default_value, target, scope):
self.name = name
self.description = None
self.type = config_type
self.default_value = default_value
self.is_optional = False
self.applicable_target = target
self.scope = scope
class NormalizedConfigEntry():
def __init__(self, config, value):
self.config = config
self.value = value
class NormalizedNodeGroup():
def __init__(self, node_group):
self.name = node_group.name
self.node_processes = node_group.components
self.node_configs = None
# TODO(jpseidel): should not have to specify img/flavor
self.img = None
# TODO(jmaron) the flavor will be set via an ambari blueprint setting,
# but that setting doesn't exist yet. It will be addressed by a bug
# fix shortly
self.flavor = 3
self.count = node_group.count
self.id = node_group.id
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import errno
import getpass
import glob
import hashlib
import json
import os
import re
import requests
_safeNameRegex = re.compile(r'^[/\\]+')
def rawInput(prompt):
"""
We don't have the ability to mock raw_input since it is a builtin, so we
wrap it here so it can be mocked to simulate user input.
"""
return raw_input(prompt) # pragma: no cover
def _safeMakedirs(path):
"""
Wraps os.makedirs in such a way that it will not raise exceptions if the
directory already exists.
:param path: The directory to create.
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise # pragma: no cover
class AuthenticationError(RuntimeError):
pass
class HttpError(Exception):
"""
Raised if the server returns an error status code from a request.
"""
def __init__(self, status, text, url, method):
Exception.__init__(self, 'HTTP error {}: {} {}'.format(
status, method, url))
self.status = status
self.responseText = text
self.url = url
self.method = method
class GirderClient(object):
"""
A class for interacting with the girder restful api.
Some simple examples of how to use this class follow:
.. code-block:: python
client = GirderClient('myhost', 8080)
client.authenticate('myname', 'mypass')
folder_id = '53b714308926486402ac5aba'
item = client.createItem(folder_id, 'an item name', 'a description')
client.addMetadataToItem(item['_id'], {'metadatakey': 'metadatavalue'})
client.uploadFileToItem(item['_id'], 'path/to/your/file.txt')
r1 = client.getItem(item['_id'])
r2 = client.sendRestRequest('GET', 'item',
{'folderId': folder_id, 'sortdir': '-1' })
r3 = client.sendRestRequest('GET', 'resource/search',
{'q': 'aggregated','types': '["folder", "item"]'})
"""
# A convenience dictionary mapping HTTP method names to functions in the
# requests module
METHODS = {
'GET': requests.get,
'POST': requests.post,
'PUT': requests.put,
'DELETE': requests.delete
}
# The current maximum chunk size for uploading file chunks
MAX_CHUNK_SIZE = 1024 * 1024 * 64
def __init__(self, host="localhost", port=8080, apiRoot=None,
scheme="http", dryrun=False, blacklist=None):
"""
Construct a new GirderClient object, given a host name and port number,
as well as a username and password which will be used in all requests
(HTTP Basic Auth).
:param host: A string containing the host name where Girder is running,
the default value is 'localhost'
:param port: The port number on which to connect to Girder,
the default value is 8080
:param apiRoot: The path on the server corresponding to the root of the
Girder REST API. If None is passed, assumes '/api/v1'.
:param scheme: A string containing the scheme for the Girder host,
the default value is 'http'; if you pass 'https' you likely want
to pass 443 for the port
"""
if apiRoot is None:
apiRoot = '/api/v1'
self.scheme = scheme
self.host = host
self.port = port
self.urlBase = self.scheme + '://' + self.host + ':' + str(self.port) \
+ apiRoot
if self.urlBase[-1] != '/':
self.urlBase += '/'
self.token = ''
self.dryrun = dryrun
self.blacklist = blacklist
if self.blacklist is None:
self.blacklist = []
self.folder_upload_callbacks = []
self.item_upload_callbacks = []
def authenticate(self, username=None, password=None, interactive=False):
"""
Authenticate to Girder, storing the token that comes back to be used in
future requests.
:param username: A string containing the username to use in basic
authentication.
:param password: A string containing the password to use in basic
authentication.
:param interactive: If you want the user to type their username or
password in the shell rather than passing it in as an argument,
set this to True. If you pass a username in interactive mode, the
user will only be prompted for a password.
"""
if interactive:
if username is None:
username = rawInput('Login or email: ')
password = getpass.getpass('Password for %s: ' % username)
if username is None or password is None:
raise Exception('A user name and password are required')
url = self.urlBase + 'user/authentication'
authResponse = requests.get(url, auth=(username, password))
if authResponse.status_code == 404:
raise HttpError(404, authResponse.text, url, "GET")
resp = authResponse.json()
if 'authToken' not in resp:
raise AuthenticationError()
self.token = resp['authToken']['token']
def sendRestRequest(self, method, path, parameters=None, data=None,
files=None):
"""
This method looks up the appropriate method, constructs a request URL
from the base URL, path, and parameters, and then sends the request. If
the method is unknown or if the path is not found, an exception is
raised, otherwise a JSON object is returned with the Girder response.
This is a convenience method to use when making basic requests that do
not involve multipart file data that might need to be specially encoded
or handled differently.
:param method: One of 'GET', 'POST', 'PUT', or 'DELETE'
:param path: A string containing the path elements for this request.
Note that the path string should not begin or end with the path
separator, '/'.
:param parameters: A dictionary mapping strings to strings, to be used
as the key/value pairs in the request parameters
"""
if not parameters:
parameters = {}
# Make sure we got a valid method
assert method in self.METHODS
# Look up the HTTP method we need
f = self.METHODS[method]
# Construct the url
url = self.urlBase + path
# Make the request, passing parameters and authentication info
result = f(url, params=parameters, data=data, files=files, headers={
'Girder-Token': self.token
})
# If success, return the json object. Otherwise throw an exception.
if result.status_code == 200:
return result.json()
# TODO handle 300-level status (follow redirect?)
else:
raise HttpError(
status=result.status_code, url=result.url, method=method,
text=result.text)
def get(self, path, parameters=None):
return self.sendRestRequest('GET', path, parameters)
def post(self, path, parameters=None, files=None):
return self.sendRestRequest('POST', path, parameters, files=files)
def put(self, path, parameters=None, data=None):
return self.sendRestRequest('PUT', path, parameters, data=data)
def delete(self, path, parameters=None):
return self.sendRestRequest('DELETE', path, parameters)
def createResource(self, path, params):
"""
Creates and returns a resource.
"""
return self.post(path, params)
def getResource(self, path, id=None, property=None):
"""
Loads a resource or resource property of property is not None
by id or None if no resource is returned.
"""
route = path
if id is not None:
route += '/%s' % id
if property is not None:
route += '/%s' % property
return self.get(route)
def listResource(self, path, params):
"""
search for a list of resources based on params.
"""
return self.get(path, params)
def createItem(self, parentFolderId, name, description):
"""
Creates and returns an item.
"""
params = {
'folderId': parentFolderId,
'name': name,
'description': description
}
return self.createResource('item', params)
def getItem(self, itemId):
"""
Retrieves a item by its ID.
:param itemId: A string containing the ID of the item to retrieve from
Girder.
"""
return self.getResource('item', itemId)
def listItem(self, folderId, text=None):
"""
Retrieves a item set from this folder ID.
:param folderId: the parent folder's ID.
:param text: query for full text search of items, optional.
"""
params = {
'folderId': folderId,
}
if text is not None:
params['text'] = text
return self.listResource('item', params)
def createFolder(self, parentId, name, description='', parentType='folder'):
"""
Creates and returns an folder
:param parentType: One of ('folder', 'user', 'collection')
"""
params = {
'parentId': parentId,
'parentType': parentType,
'name': name,
'description': description
}
return self.createResource('folder', params)
def getFolder(self, folderId):
"""
Retrieves a folder by its ID.
:param folderId: A string containing the ID of the folder to retrieve
from Girder.
"""
return self.getResource('folder', folderId)
def listFolder(self, parentId, parentFolderType='folder'):
"""
Retrieves a folder set from this parent ID.
:param parentId: The parent's ID.
:param parentFolderType: One of ('folder', 'user', 'collection').
"""
params = {
'parentId': parentId,
'parentType': parentFolderType
}
return self.listResource('folder', params)
def getFolderAccess(self, folderId):
"""
Retrieves a folder's access by its ID.
:param folderId: A string containing the ID of the folder to retrieve
access for from Girder.
"""
return self.getResource('folder', folderId, 'access')
def setFolderAccess(self, folderId, access, public):
"""
Sets the passed in access control document along with the public value
to the target folder.
:param folderId: Id of the target folder.
:param access: JSON document specifying access control.
:param public: Boolean specificying the public value.
"""
path = 'folder/' + folderId + '/access'
params = {
'access': access,
'public': public
}
return self.put(path, params)
def _file_chunker(self, filepath, filesize=None):
"""
Generator returning chunks of a file in MAX_CHUNK_SIZE increments.
:param filepath: path to file on disk.
:param filesize: size of file on disk if known.
"""
if filesize is None:
filesize = os.path.getsize(filepath)
startbyte = 0
next_chunk_size = min(self.MAX_CHUNK_SIZE, filesize - startbyte)
with open(filepath, 'rb') as fd:
while next_chunk_size > 0:
chunk = fd.read(next_chunk_size)
yield (chunk, startbyte)
startbyte = startbyte + next_chunk_size
next_chunk_size = min(self.MAX_CHUNK_SIZE,
filesize - startbyte)
def _sha512_hasher(self, filepath):
"""
Returns sha512 hash of passed in file.
:param filepath: path to file on disk.
"""
hasher = hashlib.sha512()
for chunk, _ in self._file_chunker(filepath):
hasher.update(chunk)
return hasher.hexdigest()
def isFileCurrent(self, itemId, filename, filepath):
"""
Tests whether the passed in filepath exists in the item with itemId,
with a name of filename, and with the same contents as the file at
filepath. Returns a tuple (file_id, current) where
file_id = id of the file with that filename under the item, or
None if no such file exists under the item.
current = boolean if the file with that filename under the item
has the same contents as the file at filepath.
:param itemId: ID of parent item for file.
:param filename: name of file to look for under the parent item.
:param filepath: path to file on disk.
"""
path = 'item/' + itemId + '/files'
item_files = self.get(path)
for item_file in item_files:
if filename == item_file['name']:
file_id = item_file['_id']
if 'sha512' in item_file:
if item_file['sha512'] == self._sha512_hasher(filepath):
return (file_id, True)
else:
return (file_id, False)
else:
# Some assetstores don't support sha512
# so we'll need to upload anyway
return (file_id, False)
# Some files may already be stored under a different name, we'll need
# to upload anyway in this case also.
return (None, False)
def uploadFileToItem(self, itemId, filepath):
"""
Uploads a file to an item, in chunks.
If ((the file already exists in the item with the same name and sha512)
or (if the file has 0 bytes), no uploading will be performed.
:param itemId: ID of parent item for file.
:param filepath: path to file on disk.
"""
filename = os.path.basename(filepath)
filepath = os.path.abspath(filepath)
filesize = os.path.getsize(filepath)
if filesize == 0:
return
# Check if the file already exists by name and sha512 in the file.
file_id, current = self.isFileCurrent(itemId, filename, filepath)
if file_id is not None and current:
print 'File %s already exists in parent Item' % filename
return
if file_id is not None and not current:
print 'File %s exists in Item, but with stale contents' % filename
path = 'file/' + file_id + '/contents'
params = {
'size': filesize
}
obj = self.put(path, params)
if '_id' in obj:
uploadId = obj['_id']
else:
raise Exception(
'After creating an upload token for replacing file '
'contents, expected an object with an id. Got instead: ' +
json.dumps(obj))
else:
params = {
'parentType': 'item',
'parentId': itemId,
'name': filename,
'size': filesize
}
obj = self.post('file', params)
if '_id' in obj:
uploadId = obj['_id']
else:
raise Exception(
'After creating an upload token for a new file, expected '
'an object with an id. Got instead: ' + json.dumps(obj))
for chunk, startbyte in self._file_chunker(filepath, filesize):
parameters = {
'offset': startbyte,
'uploadId': uploadId
}
filedata = {
'chunk': chunk
}
path = 'file/chunk'
obj = self.post(path, parameters=parameters, files=filedata)
if '_id' not in obj:
raise Exception('After uploading a file chunk, did'
' not receive object with _id. Got instead: ' +
json.dumps(obj))
def addMetadataToItem(self, itemId, metadata):
"""
Takes an item ID and a dictionary containing the metadata
:param itemId: ID of the item to set metadata on.
:param metadata: dictionary of metadata to set on item.
"""
path = 'item/' + itemId + '/metadata'
obj = self.put(path, data=json.dumps(metadata))
return obj
def addMetadataToFolder(self, folderId, metadata):
"""
Takes an folder ID and a dictionary containing the metadata
:param folderId: ID of the folder to set metadata on.
:param metadata: dictionary of metadata to set on folder.
"""
path = 'folder/' + folderId + '/metadata'
obj = self.put(path, data=json.dumps(metadata))
return obj
def _transformFilename(self, name):
"""
Sanitize the filename a bit.
"""
if name in ('.', '..'):
name = '_' + name
name = name.replace(os.path.sep, '_')
if os.path.altsep:
name = name.replace(os.path.altsep, '_')
return _safeNameRegex.sub('_', name)
def downloadFile(self, fileId, path):
"""
Download a file to the given local path.
:param fileId: The ID of the Girder file to download.
:param path: The local path to write the file to.
"""
with open(path, 'wb') as fd:
req = requests.get('%s/file/%s/download' % (self.urlBase, fileId),
headers={'Girder-Token': self.token})
for chunk in req.iter_content(chunk_size=65536):
fd.write(chunk)
def downloadItem(self, itemId, dest, name=None):
"""
Download an item from Girder into a local folder. Each file in the
item will be placed into the directory specified by the dest parameter.
If the item contains multiple files or a single file with a different
name than the item, the item will be created as a directory under dest
and the files will become files within that directory.
:param itemId: The Id of the Girder item to download.
:param dest: The destination directory to write the item into.
:param name: If the item name is known in advance, you may pass it here
which will save a lookup to the server.
"""
if name is None:
item = self.get('item/' + itemId)
name = item['name']
offset = 0
first = True
while True:
files = self.get('item/%s/files' % itemId, parameters={
'limit': 50,
'offset': offset
})
if first:
if len(files) == 1 and files[0]['name'] == name:
self.downloadFile(
files[0]['_id'],
os.path.join(dest, self._transformFilename(name)))
break
else:
dest = os.path.join(dest, self._transformFilename(name))
_safeMakedirs(dest)
for file in files:
self.downloadFile(
file['_id'],
os.path.join(dest, self._transformFilename(file['name'])))
first = False
offset += len(files)
if len(files) < 50:
break
def downloadFolderRecursive(self, folderId, dest):
"""
Download a folder recursively from Girder into a local directory.
:param folderId: Id of the Girder folder to download.
:param dest: The local download destination.
"""
offset = 0
while True:
folders = self.get('folder', parameters={
'limit': 50,
'offset': offset,
'parentType': 'folder',
'parentId': folderId
})
for folder in folders:
local = os.path.join(
dest, self._transformFilename(folder['name']))
_safeMakedirs(local)
self.downloadFolderRecursive(folder['_id'], local)
offset += len(folders)
if len(folders) < 50:
break
offset = 0
while True:
items = self.get('item', parameters={
'folderId': folderId,
'limit': 50,
'offset': offset
})
for item in items:
self.downloadItem(item['_id'], dest, name=item['name'])
offset += len(items)
if len(items) < 50:
break
def inheritAccessControlRecursive(self, ancestorFolderId, access=None,
public=None):
"""
Take the access control and public value of a folder and recursively
copy that access control and public value to all folder descendants,
replacing any existing access control on the descendant folders with
that of the ancestor folder.
:param ancestorFolderId: Id of the Girder folder to copy access
control from, to all of its descendant folders.
:param access: Dictionary Access control target, if None, will take
existing access control of ancestor folder
:param public: Boolean public value target, if None, will take existing
public value of ancestor folder
"""
offset = 0
if public is None:
public = self.getFolder(ancestorFolderId)['public']
if access is None:
access = self.getFolderAccess(ancestorFolderId)
while True:
self.setFolderAccess(ancestorFolderId, json.dumps(access), public)
folders = self.get('folder', parameters={
'limit': 50,
'offset': offset,
'parentType': 'folder',
'parentId': ancestorFolderId
})
for folder in folders:
self.inheritAccessControlRecursive(folder['_id'], access,
public)
offset += len(folders)
if len(folders) < 50:
break
def add_folder_upload_callback(self, callback):
"""Saves a passed in callback function that will be called after each
folder has completed. Multiple callback functions can be added, they
will be called in the order they were added by calling this function.
Callback functions will be called after a folder in Girder is created
and all subfolders and items for that folder have completed uploading.
Callback functions should take two parameters:
- the folder in girder
- the full path to the local folder
:param callback: callback function to be called
"""
self.folder_upload_callbacks.append(callback)
def add_item_upload_callback(self, callback):
"""Saves a passed in callback function that will be called after each
item has completed. Multiple callback functions can be added, they
will be called in the order they were added by calling this function.
Callback functions will be called after an item in Girder is created
and all files for that item have been uploaded. Callback functions
should take two parameters:
- the item in girder
- the full path to the local folder or file comprising the item
:param callback: callback function to be called
"""
self.item_upload_callbacks.append(callback)
def _load_or_create_folder(self, local_folder, parent_id, parent_type):
"""Returns a folder in Girder with the same name as the passed in
local_folder under the parent_id, creating a new Girder folder
if need be or returning an existing folder with that name.
:param local_folder: full path to the local folder
:param parent_id: id of parent in Girder
:param parent_type: one of (collection, folder, user)
"""
child_folders = self.listFolder(parent_id, parent_type)
folder_name = os.path.basename(local_folder)
folder = None
for child in child_folders:
if child['name'] == folder_name:
folder = child
if folder is None:
folder = self.createFolder(
parent_id, folder_name, parentType=parent_type)
return folder
def _has_only_files(self, local_folder):
"""Returns whether a folder has only files. This will be false if the
folder contains any subdirectories.
:param local_folder: full path to the local folder
"""
return not any(os.path.isdir(os.path.join(local_folder, entry))
for entry in os.listdir(local_folder))
def _create_or_reuse_item(self, local_file, parent_folder_id,
reuse_existing=False):
"""Create an item from the local_file in the parent_folder
:param local_file: full path to a file on the local file system
:param parent_folder_id: id of parent folder in Girder
:param reuse_existing: boolean indicating whether to accept an existing
item of the same name in the same location, or create a new one.
"""
local_item_name = os.path.basename(local_file)
item = None
if reuse_existing:
children = self.listItem(parent_folder_id, local_item_name)
for child in children:
if child['name'] == local_item_name:
item = child
break
if item is None:
item = self.createItem(parent_folder_id, local_item_name,
description='')
return item
def _upload_file_to_item(self, local_file, parent_item_id, file_path):
"""Helper function to upload a file to an item
:param local_file: name of local file to upload
:param parent_item_id: id of parent item in Girder to add file to
:param file_path: full path to the file
"""
self.uploadFileToItem(parent_item_id, file_path)
def _upload_as_item(self, local_file, parent_folder_id, file_path,
reuse_existing=False):
"""Function for doing an upload of a file as an item.
:param local_file: name of local file to upload
:param parent_folder_id: id of parent folder in Girder
:param file_path: full path to the file
:param reuse_existing: boolean indicating whether to accept an existing
item
of the same name in the same location, or create a new one instead
"""
print 'Uploading Item from %s' % local_file
if not self.dryrun:
current_item = self._create_or_reuse_item(
local_file, parent_folder_id, reuse_existing)
self._upload_file_to_item(
local_file, current_item['_id'], file_path)
for callback in self.item_upload_callbacks:
callback(current_item, file_path)
def _upload_folder_as_item(self, local_folder, parent_folder_id,
reuse_existing=False):
"""Take a folder and use its base name as the name of a new item. Then,
upload its containing files into the new item as bitstreams.
:param local_folder: The path to the folder to be uploaded.
:param parent_folder_id: Id of the destination folder for the new item.
:param reuse_existing: boolean indicating whether to accept an existing
item
of the same name in the same location, or create a new one instead
"""
print 'Creating Item from folder %s' % local_folder
if not self.dryrun:
item = self._create_or_reuse_item(local_folder, parent_folder_id,
reuse_existing)
subdircontents = sorted(os.listdir(local_folder))
# for each file in the subdir, add it to the item
filecount = len(subdircontents)
for (ind, current_file) in enumerate(subdircontents):
filepath = os.path.join(local_folder, current_file)
if current_file in self.blacklist:
if self.dryrun:
print "Ignoring file %s as blacklisted" % current_file
continue
print 'Adding file %s, (%d of %d) to Item' % (current_file,
ind + 1, filecount)
if not self.dryrun:
self._upload_file_to_item(current_file, item['_id'], filepath)
if not self.dryrun:
for callback in self.item_upload_callbacks:
callback(item, local_folder)
def _upload_folder_recursive(self, local_folder, parent_id, parent_type,
leaf_folders_as_items=False,
reuse_existing=False):
"""Function to recursively upload a folder and all of its descendants.
:param local_folder: full path to local folder to be uploaded
:param parent_id: id of parent in Girder,
where new folder will be added
:param parent_type: one of (collection, folder, user)
:param leaf_folders_as_items: whether leaf folders should have all
files uploaded as single items
:param reuse_existing: boolean indicating whether to accept an existing
item
of the same name in the same location, or create a new one instead
"""
if leaf_folders_as_items and self._has_only_files(local_folder):
if parent_type != 'folder':
raise Exception(
('Attempting to upload a folder as an item under a %s. '
% parent_type) + 'Items can only be added to folders.')
else:
self._upload_folder_as_item(local_folder, parent_id,
reuse_existing)
else:
filename = os.path.basename(local_folder)
if filename in self.blacklist:
if self.dryrun:
print "Ignoring file %s as it is blacklisted" % filename
return
print 'Creating Folder from %s' % local_folder
if self.dryrun:
# create a dryrun placeholder
folder = {'_id': 'dryrun'}
else:
folder = self._load_or_create_folder(
local_folder, parent_id, parent_type)
for entry in sorted(os.listdir(local_folder)):
if entry in self.blacklist:
if self.dryrun:
print "Ignoring file %s as it is blacklisted" % entry
continue
full_entry = os.path.join(local_folder, entry)
if os.path.islink(full_entry):
# os.walk skips symlinks by default
print "Skipping file %s as it is a symlink" % entry
continue
elif os.path.isdir(full_entry):
# At this point we should have an actual folder, so can
# pass that as the parent_type
self._upload_folder_recursive(
full_entry, folder['_id'], 'folder',
leaf_folders_as_items, reuse_existing)
else:
self._upload_as_item(
entry, folder['_id'], full_entry, reuse_existing)
if not self.dryrun:
for callback in self.folder_upload_callbacks:
callback(folder, local_folder)
def upload(self, file_pattern, parent_id, parent_type='folder',
leaf_folders_as_items=False, reuse_existing=False):
"""Upload a pattern of files.
This will recursively walk down every tree in the file pattern to
create a hierarchy on the server under the parent_id.
:param file_pattern: a glob pattern for files that will be uploaded,
recursively copying any file folder structures
:param parent_id: id of the parent in girder
:param parent_type: one of (collection, folder, user) default of folder
:param leaf_folders_as_items: whether leaf folders should have all
files uploaded as single items
:param reuse_existing: boolean indicating whether to accept an existing
item of the same name in the same location, or create a new one instead
"""
empty = True
for current_file in glob.iglob(file_pattern):
empty = False
current_file = os.path.normpath(current_file)
filename = os.path.basename(current_file)
if filename in self.blacklist:
if self.dryrun:
print "Ignoring file %s as it is blacklisted" % filename
continue
if os.path.isfile(current_file):
if parent_type != 'folder':
raise Exception(('Attempting to upload an item under a %s.'
% parent_type) +
' Items can only be added to folders.')
else:
self._upload_as_item(
os.path.basename(current_file), parent_id,
current_file, reuse_existing)
else:
self._upload_folder_recursive(
current_file, parent_id, parent_type,
leaf_folders_as_items, reuse_existing)
if empty:
print 'No matching files: ' + file_pattern
| |
import os
import numpy as np
def TH14evaldet(detfilename, gtpath, subset):
# Read ground truth
detClassFile = open(os.path.join(gtpath, 'detclasslist.txt'))
classNames = dict()
classNamesAll = dict()
for line in detClassFile.readlines():
classNames[(line.split()[-1].strip())] = int(line.split()[0].strip())
classNamesAll[(line.split()[-1].strip())] = int(line.split()[0].strip())
classNamesAll['Ambiguous'] = 102
indName = dict((value, key) for key, value in classNames.iteritems())
gtEvents = []
for className in classNamesAll.keys():
gtfilename = className + '_' + subset + '.txt'
try:
gtfile = open(os.path.join(gtpath, gtfilename))
except IOError:
print 'ERROR: Cannot open the gt file!'
for videoFile in gtfile.readlines():
gtEvent = dict()
videoName = videoFile.split()[0].strip()
startTime = float(videoFile.split()[1].strip())
endTime = float(videoFile.split()[2].strip())
# Dict for gt event info
gtEvent['videoName'] = videoName
gtEvent['timeInterval'] = [startTime, endTime]
gtEvent['className'] = className
gtEvent['conf'] = 1
gtEvents.append(gtEvent)
# Parse detection results
try:
detFile = open(os.path.join(gtpath, detfilename))
except IOError:
print 'ERROR: Cannot find the dection result file %s\n'%detfilename
detEvents = []
for detFileName in detFile.readlines():
detEvent = dict()
strList = detFileName.split()
if indName.get(int(strList[3].strip())):
detEvent['videoName'] = strList[0].split('.')[0].strip()
detEvent['timeInterval'] = [float(strList[1].strip()), float(strList[2].strip())]
detEvent['className'] = indName[int(strList[3].strip())]
detEvent['conf'] = float(strList[-1].strip())
detEvents.append(detEvent)
else:
print 'WARNING: Reported class ID %d is not among THUMOS14 detection classes.\n'
# Evaluate per-class PR for multiple overlap thresholds
overlapthreshall = [0.1, 0.2, 0.3, 0.4, 0.5]
ap_all = []
pr_all = []
map = []
for olap in overlapthreshall:
ap_c = []
pr_ = dict()
pr_c = []
for className in classNames.keys():
ind = classNames[className]
rec, prec, ap = TH14evaldetpr(detEvents, gtEvents, className, olap)
pr_['classInd'] = ind
pr_['overlapThresh'] = olap
pr_['prec'] = prec
pr_['ap'] = ap
ap_c.append(ap)
pr_c.append(pr_)
print 'AP:%1.3f at overlap %1.1f for %s\n'%(ap,olap,className)
ap_all.append(ap_c)
pr_all.append(pr_c)
map.append(sum(ap_c)/len(ap_c))
return pr_all,ap_all,map
def TH14evaldetpr(detEvents, gtEvents, className, olap):
videoNames = set()
detEventSub = []
gtEventSub = []
ambEventSub = []
for detEvent in detEvents:
videoNames.add(detEvent['videoName'])
if detEvent['className'] == className:
detEventSub.append(detEvent)
for gtEvent in gtEvents:
videoNames.add(gtEvent['videoName'])
if gtEvent['className'] == 'Ambiguous':
ambEventSub.append(gtEvent)
if gtEvent['className'] == className:
gtEventSub.append(gtEvent)
npos = len(gtEventSub)
assert npos>0
tpConf = np.array([])
fpConf = np.array([])
for videoName in videoNames:
detEventSubVideo = []
gtEventSubVideo = []
ambEventSubVideo = []
for detEvent in detEventSub:
if detEvent['videoName'] == videoName:
detEventSubVideo.append(detEvent)
for gtEvent in gtEventSub:
if gtEvent['videoName'] == videoName:
gtEventSubVideo.append(gtEvent)
for ambEvent in ambEventSub:
if ambEvent['videoName'] == videoName:
ambEventSubVideo.append(ambEvent)
if len(detEventSubVideo):
detEventSubVideo.sort(lambda x, y: cmp(x['conf'], y['conf']), reverse=True)
conf = np.array([detEvent['conf'] for detEvent in detEventSubVideo])
indFree = np.ones(len(detEventSubVideo))
indAmb = np.zeros(len(detEventSubVideo))
if len(gtEventSubVideo):
ov = IntervalOverlapSeconds([gtEvent['timeInterval'] for gtEvent in gtEventSubVideo],
[detEvent['timeInterval'] for detEvent in detEventSubVideo])
for ov_r in ov:
if sum(indFree):
npov = np.array(ov_r)
npov[indFree==0] = 0
if npov.max() > olap:
indFree[npov.argmax()] = 0
if len(ambEventSubVideo):
ovamb = IntervalOverlapSeconds([ambEvent['timeInterval'] for ambEvent in ambEventSubVideo],
[detEvent['timeInterval'] for detEvent in detEventSubVideo])
indAmb = np.array(ovamb).sum(0)
fpConf = np.append(fpConf, conf[indFree==1][indAmb[indFree==1]==0])
tpConf = np.append(tpConf, conf[indFree==0])
Conf = np.append(tpConf, fpConf)
ConfInd = np.append(np.ones(tpConf.shape),2*np.ones(fpConf.shape))
ConfInd = ConfInd[np.argsort(-Conf)]
TP = np.zeros(Conf.shape)
FP = np.zeros(Conf.shape)
TP[ConfInd==1] = 1
FP[ConfInd==2] = 1
TP = np.cumsum(TP)
FP = np.cumsum(FP)
rec = TP/npos
prec = TP/(FP+TP)
ap = prap(rec,prec)
return rec, prec, ap
def prap(rec,prec):
ap = 0.0
recallpoints = np.array([0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1])
for t in recallpoints:
if len(prec[rec>=t]):
p = prec[rec>=t].max()
else:
p = 0
ap += p/len(recallpoints)
return ap
def IntervalOverlapSeconds(i1,i2,normtype=0):
ov = []
for ii1 in i1:
ov_r = []
for ii2 in i2:
ov_e = IntervalSingleOverlapSeconds(ii1,ii2,normtype)
ov_r.append(ov_e)
ov.append(ov_r)
return ov
def IntervalSingleOverlapSeconds(ii1,ii2,normtype):
i1 = np.sort(np.array(ii1))
i2 = np.sort(np.array(ii2))
ov = 0.0
if normtype<0:
ua = 1.0
elif normtype == 1:
ua = i1[1]-i1[0]
elif normtype == 2:
ua = i2[1]-i2[0]
else:
ua = np.max([i1[1],i2[1]]) - np.min([i1[0],i2[0]])
iw = np.min([i1[1],i2[1]]) - np.max([i1[0],i2[0]])
if iw > 0:
ov = iw/ua
return ov
if __name__ == '__main__':
pr_all, ap_all, map = TH14evaldet('/home/gzn/code/FisherTensor/FisherTensor-MATLAB/eval/Run-det-gzn_20170924_frame.txt',
'../data/TH14_Temporal_Annotations_Test/annotations/annotation/', 'test')
# pr_all, ap_all, map = TH14evaldet('../results/Run-2-det.txt','../data/TH14evalkit/groundtruth/','val')
print map
| |
"""Tests for options manager for :class:`Poly` and public API functions. """
from sympy.polys.polyoptions import (
Options, Expand, Gens, Wrt, Sort, Order, Field, Greedy, Domain,
Split, Gaussian, Extension, Modulus, Symmetric, Strict, Auto,
Frac, Formal, Polys, Include, All, Gen, Symbols, Method)
from sympy.polys.monomialtools import lex
from sympy.polys.domains import FF, GF, ZZ, QQ, RR, EX
from sympy.polys.polyerrors import OptionError, GeneratorsError
from sympy import Integer, Symbol, I, sqrt
from sympy.utilities.pytest import raises
from sympy.abc import x, y, z
def test_Options_clone():
opt = Options((x, y, z), {'domain': 'ZZ'})
assert opt.gens == (x, y, z)
assert opt.domain == ZZ
assert ('order' in opt) == False
new_opt = opt.clone({'gens': (x,y), 'order': 'lex'})
assert opt.gens == (x, y, z)
assert opt.domain == ZZ
assert ('order' in opt) == False
assert new_opt.gens == (x, y)
assert new_opt.domain == ZZ
assert ('order' in new_opt) == True
def test_Expand_preprocess():
assert Expand.preprocess(False) is False
assert Expand.preprocess(True) is True
assert Expand.preprocess(0) is False
assert Expand.preprocess(1) is True
raises(OptionError, lambda: Expand.preprocess(x))
def test_Expand_postprocess():
opt = {'expand': True}
Expand.postprocess(opt)
assert opt == {'expand': True}
def test_Gens_preprocess():
assert Gens.preprocess((None,)) == ()
assert Gens.preprocess((x, y, z)) == (x, y, z)
assert Gens.preprocess(((x, y, z),)) == (x, y, z)
a = Symbol('a', commutative=False)
raises(GeneratorsError, lambda: Gens.preprocess((x, x, y)))
raises(GeneratorsError, lambda: Gens.preprocess((x, y, a)))
def test_Gens_postprocess():
opt = {'gens': (x, y)}
Gens.postprocess(opt)
assert opt == {'gens': (x, y)}
def test_Wrt_preprocess():
assert Wrt.preprocess(x) == ['x']
assert Wrt.preprocess('') == []
assert Wrt.preprocess(' ') == []
assert Wrt.preprocess('x,y') == ['x', 'y']
assert Wrt.preprocess('x y') == ['x', 'y']
assert Wrt.preprocess('x, y') == ['x', 'y']
assert Wrt.preprocess('x , y') == ['x', 'y']
assert Wrt.preprocess(' x, y') == ['x', 'y']
assert Wrt.preprocess(' x, y') == ['x', 'y']
assert Wrt.preprocess([x, y]) == ['x', 'y']
raises(OptionError, lambda: Wrt.preprocess(','))
raises(OptionError, lambda: Wrt.preprocess(0))
def test_Wrt_postprocess():
opt = {'wrt': ['x']}
Wrt.postprocess(opt)
assert opt == {'wrt': ['x']}
def test_Sort_preprocess():
assert Sort.preprocess([x, y, z]) == ['x', 'y', 'z']
assert Sort.preprocess((x, y, z)) == ['x', 'y', 'z']
assert Sort.preprocess('x > y > z') == ['x', 'y', 'z']
assert Sort.preprocess('x>y>z') == ['x', 'y', 'z']
raises(OptionError, lambda: Sort.preprocess(0))
raises(OptionError, lambda: Sort.preprocess(set([x, y, z])))
def test_Sort_postprocess():
opt = {'sort': 'x > y'}
Sort.postprocess(opt)
assert opt == {'sort': 'x > y'}
def test_Order_preprocess():
assert Order.preprocess('lex') == lex
def test_Order_postprocess():
opt = {'order': True}
Order.postprocess(opt)
assert opt == {'order': True}
def test_Field_preprocess():
assert Field.preprocess(False) is False
assert Field.preprocess(True) is True
assert Field.preprocess(0) is False
assert Field.preprocess(1) is True
raises(OptionError, lambda: Field.preprocess(x))
def test_Field_postprocess():
opt = {'field': True}
Field.postprocess(opt)
assert opt == {'field': True}
def test_Greedy_preprocess():
assert Greedy.preprocess(False) is False
assert Greedy.preprocess(True) is True
assert Greedy.preprocess(0) is False
assert Greedy.preprocess(1) is True
raises(OptionError, lambda: Greedy.preprocess(x))
def test_Greedy_postprocess():
opt = {'greedy': True}
Greedy.postprocess(opt)
assert opt == {'greedy': True}
def test_Domain_preprocess():
assert Domain.preprocess(ZZ) == ZZ
assert Domain.preprocess(QQ) == QQ
assert Domain.preprocess(EX) == EX
assert Domain.preprocess(FF(2)) == FF(2)
assert Domain.preprocess(ZZ[x,y]) == ZZ[x,y]
assert Domain.preprocess('Z') == ZZ
assert Domain.preprocess('Q') == QQ
assert Domain.preprocess('ZZ') == ZZ
assert Domain.preprocess('QQ') == QQ
assert Domain.preprocess('EX') == EX
assert Domain.preprocess('FF(23)') == FF(23)
assert Domain.preprocess('GF(23)') == GF(23)
raises(OptionError, lambda: Domain.preprocess('Z[]'))
assert Domain.preprocess('Z[x]') == ZZ[x]
assert Domain.preprocess('Q[x]') == QQ[x]
assert Domain.preprocess('ZZ[x]') == ZZ[x]
assert Domain.preprocess('QQ[x]') == QQ[x]
assert Domain.preprocess('Z[x,y]') == ZZ[x,y]
assert Domain.preprocess('Q[x,y]') == QQ[x,y]
assert Domain.preprocess('ZZ[x,y]') == ZZ[x,y]
assert Domain.preprocess('QQ[x,y]') == QQ[x,y]
raises(OptionError, lambda: Domain.preprocess('Z()'))
assert Domain.preprocess('Z(x)') == ZZ.frac_field(x)
assert Domain.preprocess('Q(x)') == QQ.frac_field(x)
assert Domain.preprocess('ZZ(x)') == ZZ.frac_field(x)
assert Domain.preprocess('QQ(x)') == QQ.frac_field(x)
assert Domain.preprocess('Z(x,y)') == ZZ.frac_field(x,y)
assert Domain.preprocess('Q(x,y)') == QQ.frac_field(x,y)
assert Domain.preprocess('ZZ(x,y)') == ZZ.frac_field(x,y)
assert Domain.preprocess('QQ(x,y)') == QQ.frac_field(x,y)
assert Domain.preprocess('Q<I>') == QQ.algebraic_field(I)
assert Domain.preprocess('QQ<I>') == QQ.algebraic_field(I)
assert Domain.preprocess('Q<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
assert Domain.preprocess('QQ<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
raises(OptionError, lambda: Domain.preprocess('abc'))
def test_Domain_postprocess():
raises(GeneratorsError, lambda: Domain.postprocess({'gens': (x, y), 'domain': ZZ[y, z]}))
raises(GeneratorsError, lambda: Domain.postprocess({'gens': (), 'domain': EX}))
raises(GeneratorsError, lambda: Domain.postprocess({'domain': EX}))
def test_Split_preprocess():
assert Split.preprocess(False) is False
assert Split.preprocess(True) is True
assert Split.preprocess(0) is False
assert Split.preprocess(1) is True
raises(OptionError, lambda: Split.preprocess(x))
def test_Split_postprocess():
raises(NotImplementedError, lambda: Split.postprocess({'split': True}))
def test_Gaussian_preprocess():
assert Gaussian.preprocess(False) is False
assert Gaussian.preprocess(True) is True
assert Gaussian.preprocess(0) is False
assert Gaussian.preprocess(1) is True
raises(OptionError, lambda: Gaussian.preprocess(x))
def test_Gaussian_postprocess():
opt = {'gaussian': True}
Gaussian.postprocess(opt)
assert opt == {
'gaussian': True,
'extension': set([I]),
'domain': QQ.algebraic_field(I),
}
def test_Extension_preprocess():
assert Extension.preprocess(True) is True
assert Extension.preprocess(1) is True
assert Extension.preprocess([]) is None
assert Extension.preprocess(sqrt(2)) == set([sqrt(2)])
assert Extension.preprocess([sqrt(2)]) == set([sqrt(2)])
assert Extension.preprocess([sqrt(2), I]) == set([sqrt(2), I])
raises(OptionError, lambda: Extension.preprocess(False))
raises(OptionError, lambda: Extension.preprocess(0))
def test_Extension_postprocess():
opt = {'extension': set([sqrt(2)])}
Extension.postprocess(opt)
assert opt == {
'extension': set([sqrt(2)]),
'domain': QQ.algebraic_field(sqrt(2)),
}
opt = {'extension': True}
Extension.postprocess(opt)
assert opt == {'extension': True}
def test_Modulus_preprocess():
assert Modulus.preprocess(23) == 23
assert Modulus.preprocess(Integer(23)) == 23
raises(OptionError, lambda: Modulus.preprocess(0))
raises(OptionError, lambda: Modulus.preprocess(x))
def test_Modulus_postprocess():
opt = {'modulus': 5}
Modulus.postprocess(opt)
assert opt == {
'modulus': 5,
'domain': FF(5),
}
opt = {'modulus': 5, 'symmetric': False}
Modulus.postprocess(opt)
assert opt == {
'modulus': 5,
'domain': FF(5, False),
'symmetric': False,
}
def test_Symmetric_preprocess():
assert Symmetric.preprocess(False) is False
assert Symmetric.preprocess(True) is True
assert Symmetric.preprocess(0) is False
assert Symmetric.preprocess(1) is True
raises(OptionError, lambda: Symmetric.preprocess(x))
def test_Symmetric_postprocess():
opt = {'symmetric': True}
Symmetric.postprocess(opt)
assert opt == {'symmetric': True}
def test_Strict_preprocess():
assert Strict.preprocess(False) is False
assert Strict.preprocess(True) is True
assert Strict.preprocess(0) is False
assert Strict.preprocess(1) is True
raises(OptionError, lambda: Strict.preprocess(x))
def test_Strict_postprocess():
opt = {'strict': True}
Strict.postprocess(opt)
assert opt == {'strict': True}
def test_Auto_preprocess():
assert Auto.preprocess(False) is False
assert Auto.preprocess(True) is True
assert Auto.preprocess(0) is False
assert Auto.preprocess(1) is True
raises(OptionError, lambda: Auto.preprocess(x))
def test_Auto_postprocess():
opt = {'auto': True}
Auto.postprocess(opt)
assert opt == {'auto': True}
def test_Frac_preprocess():
assert Frac.preprocess(False) is False
assert Frac.preprocess(True) is True
assert Frac.preprocess(0) is False
assert Frac.preprocess(1) is True
raises(OptionError, lambda: Frac.preprocess(x))
def test_Frac_postprocess():
opt = {'frac': True}
Frac.postprocess(opt)
assert opt == {'frac': True}
def test_Formal_preprocess():
assert Formal.preprocess(False) is False
assert Formal.preprocess(True) is True
assert Formal.preprocess(0) is False
assert Formal.preprocess(1) is True
raises(OptionError, lambda: Formal.preprocess(x))
def test_Formal_postprocess():
opt = {'formal': True}
Formal.postprocess(opt)
assert opt == {'formal': True}
def test_Polys_preprocess():
assert Polys.preprocess(False) is False
assert Polys.preprocess(True) is True
assert Polys.preprocess(0) is False
assert Polys.preprocess(1) is True
raises(OptionError, lambda: Polys.preprocess(x))
def test_Polys_postprocess():
opt = {'polys': True}
Polys.postprocess(opt)
assert opt == {'polys': True}
def test_Include_preprocess():
assert Include.preprocess(False) is False
assert Include.preprocess(True) is True
assert Include.preprocess(0) is False
assert Include.preprocess(1) is True
raises(OptionError, lambda: Include.preprocess(x))
def test_Include_postprocess():
opt = {'include': True}
Include.postprocess(opt)
assert opt == {'include': True}
def test_All_preprocess():
assert All.preprocess(False) is False
assert All.preprocess(True) is True
assert All.preprocess(0) is False
assert All.preprocess(1) is True
raises(OptionError, lambda: All.preprocess(x))
def test_All_postprocess():
opt = {'all': True}
All.postprocess(opt)
assert opt == {'all': True}
def test_Gen_postprocess():
opt = {'gen': x}
Gen.postprocess(opt)
assert opt == {'gen': x}
def test_Symbols_preprocess():
raises(OptionError, lambda: Symbols.preprocess(x))
def test_Symbols_postprocess():
opt = {'symbols': [x, y, z]}
Symbols.postprocess(opt)
assert opt == {'symbols': [x, y, z]}
def test_Method_preprocess():
raises(OptionError, lambda: Method.preprocess(10))
def test_Method_postprocess():
opt = {'method': 'f5b'}
Method.postprocess(opt)
assert opt == {'method': 'f5b'}
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from mock import patch
from mock import MagicMock as Mock
from pyrax.clouddatabases import CloudDatabaseDatabase
from pyrax.clouddatabases import CloudDatabaseFlavor
from pyrax.clouddatabases import CloudDatabaseInstance
from pyrax.clouddatabases import CloudDatabaseUser
from pyrax.clouddatabases import CloudDatabaseVolume
from pyrax.clouddatabases import assure_instance
import pyrax.exceptions as exc
import pyrax.utils as utils
from pyrax import fakes
example_uri = "http://example.com"
class CloudDatabasesTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CloudDatabasesTest, self).__init__(*args, **kwargs)
def setUp(self):
self.instance = fakes.FakeDatabaseInstance()
self.client = fakes.FakeDatabaseClient()
def tearDown(self):
pass
def test_assure_instance(self):
class TestClient(object):
_manager = fakes.FakeManager()
@assure_instance
def test_method(self, instance):
return instance
client = TestClient()
client._manager.get = Mock(return_value=self.instance)
# Pass the instance
ret = client.test_method(self.instance)
self.assertTrue(ret is self.instance)
# Pass the ID
ret = client.test_method(self.instance.id)
self.assertTrue(ret is self.instance)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_instantiate_instance(self):
inst = CloudDatabaseInstance(fakes.FakeManager(), {"id": 42,
"volume": {"size": 1, "used": 0.2}})
self.assertTrue(isinstance(inst, CloudDatabaseInstance))
self.assertTrue(isinstance(inst.volume, CloudDatabaseVolume))
def test_list_databases(self):
inst = self.instance
inst._database_manager.list = Mock()
limit = utils.random_unicode()
marker = utils.random_unicode()
inst.list_databases(limit=limit, marker=marker)
inst._database_manager.list.assert_called_once_with(limit=limit,
marker=marker)
def test_list_users(self):
inst = self.instance
inst._user_manager.list = Mock()
limit = utils.random_unicode()
marker = utils.random_unicode()
inst.list_users(limit=limit, marker=marker)
inst._user_manager.list.assert_called_once_with(limit=limit,
marker=marker)
def test_get_database(self):
inst = self.instance
db1 = fakes.FakeEntity()
db1.name = "a"
db2 = fakes.FakeEntity()
db2.name = "b"
inst.list_databases = Mock(return_value=[db1, db2])
ret = inst.get_database("a")
self.assertEqual(ret, db1)
def test_get_database_bad(self):
inst = self.instance
db1 = fakes.FakeEntity()
db1.name = "a"
db2 = fakes.FakeEntity()
db2.name = "b"
inst.list_databases = Mock(return_value=[db1, db2])
self.assertRaises(exc.NoSuchDatabase, inst.get_database, "z")
def test_dbmgr_get(self):
mgr = fakes.FakeDatabaseManager()
rsrc = fakes.FakeDatabaseInstance()
rsrc.volume = {}
mgr._get = Mock(return_value=rsrc)
ret = mgr.get("fake")
self.assertTrue(isinstance(ret, CloudDatabaseInstance))
self.assertTrue(isinstance(ret.volume, CloudDatabaseVolume))
def test_dbmgr_create_backup(self):
inst = self.instance
mgr = inst.manager
name = utils.random_unicode()
description = utils.random_unicode()
mgr.api.method_post = Mock(return_value=(None, {"backup": {}}))
expected_uri = "/backups"
expected_body = {"backup": {"instance": inst.id, "name": name,
"description": description}}
mgr.create_backup(inst, name, description=description)
mgr.api.method_post.assert_called_once_with(expected_uri,
body=expected_body)
@patch('pyrax.clouddatabases.CloudDatabaseInstance',
new=fakes.FakeDatabaseInstance)
def test_mgr_restore_backup(self):
inst = self.instance
mgr = inst.manager
name = utils.random_unicode()
flavor = utils.random_unicode()
fref = utils.random_unicode()
volume = utils.random_unicode()
backup = utils.random_unicode()
mgr.api.method_post = Mock(return_value=(None, {"instance": {}}))
mgr.api._get_flavor_ref = Mock(return_value=fref)
expected_uri = "/%s" % mgr.uri_base
expected_body = {"instance": {"name": name, "flavorRef": fref,
"volume": {"size": volume}, "restorePoint":
{"backupRef": backup}}}
mgr.restore_backup(backup, name, flavor, volume)
mgr.api.method_post.assert_called_once_with(expected_uri,
body=expected_body)
def test_mgr_list_backups(self):
inst = self.instance
mgr = inst.manager
mgr.api._backup_manager.list = Mock(return_value=(None, None))
mgr.list_backups(inst)
mgr.api._backup_manager.list.assert_called_once_with(instance=inst,
limit=20, marker=0)
def test_mgr_list_backups_for_instance(self):
inst = self.instance
mgr = inst.manager
mgr.api.method_get = Mock(return_value=(None, {"backups": []}))
expected_uri = "/%s/%s/backups?limit=20&marker=0" % (mgr.uri_base, inst.id)
mgr._list_backups_for_instance(inst)
mgr.api.method_get.assert_called_once_with(expected_uri)
def test_create_database(self):
inst = self.instance
inst._database_manager.create = Mock()
inst._database_manager.find = Mock()
db = inst.create_database(name="test")
inst._database_manager.create.assert_called_once_with(name="test",
character_set="utf8", collate="utf8_general_ci",
return_none=True)
def test_create_user(self):
inst = self.instance
inst._user_manager.create = Mock()
inst._user_manager.find = Mock()
name = utils.random_unicode()
password = utils.random_unicode()
database_names = utils.random_unicode()
host = utils.random_unicode()
inst.create_user(name=name, password=password,
database_names=database_names, host=host)
inst._user_manager.create.assert_called_once_with(name=name,
password=password, database_names=[database_names], host=host,
return_none=True)
def test_delete_database(self):
inst = self.instance
inst._database_manager.delete = Mock()
inst.delete_database("dbname")
inst._database_manager.delete.assert_called_once_with("dbname")
def test_delete_user(self):
inst = self.instance
inst._user_manager.delete = Mock()
inst.delete_user("username")
inst._user_manager.delete.assert_called_once_with("username")
def test_delete_database_direct(self):
inst = self.instance
mgr = inst.manager
name = utils.random_unicode()
db = CloudDatabaseDatabase(mgr, info={"name": name})
mgr.delete = Mock()
db.delete()
mgr.delete.assert_called_once_with(name)
def test_delete_user_direct(self):
inst = self.instance
mgr = inst.manager
name = utils.random_unicode()
user = CloudDatabaseUser(mgr, info={"name": name})
mgr.delete = Mock()
user.delete()
mgr.delete.assert_called_once_with(name)
def test_enable_root_user(self):
inst = self.instance
pw = utils.random_unicode()
fake_body = {"user": {"password": pw}}
inst.manager.api.method_post = Mock(return_value=(None, fake_body))
ret = inst.enable_root_user()
call_uri = "/instances/%s/root" % inst.id
inst.manager.api.method_post.assert_called_once_with(call_uri)
self.assertEqual(ret, pw)
def test_root_user_status(self):
inst = self.instance
fake_body = {"rootEnabled": True}
inst.manager.api.method_get = Mock(return_value=(None, fake_body))
ret = inst.root_user_status()
call_uri = "/instances/%s/root" % inst.id
inst.manager.api.method_get.assert_called_once_with(call_uri)
self.assertTrue(ret)
def test_restart(self):
inst = self.instance
inst.manager.action = Mock()
ret = inst.restart()
inst.manager.action.assert_called_once_with(inst, "restart")
def test_resize(self):
inst = self.instance
flavor_ref = utils.random_unicode()
inst.manager.api._get_flavor_ref = Mock(return_value=flavor_ref)
fake_body = {"flavorRef": flavor_ref}
inst.manager.action = Mock()
ret = inst.resize(42)
call_uri = "/instances/%s/action" % inst.id
inst.manager.action.assert_called_once_with(inst, "resize",
body=fake_body)
def test_resize_volume_too_small(self):
inst = self.instance
inst.volume.get = Mock(return_value=2)
self.assertRaises(exc.InvalidVolumeResize, inst.resize_volume, 1)
def test_resize_volume(self):
inst = self.instance
fake_body = {"volume": {"size": 2}}
inst.manager.action = Mock()
ret = inst.resize_volume(2)
inst.manager.action.assert_called_once_with(inst, "resize",
body=fake_body)
def test_resize_volume_direct(self):
inst = self.instance
vol = inst.volume
fake_body = {"volume": {"size": 2}}
inst.manager.action = Mock()
ret = vol.resize(2)
inst.manager.action.assert_called_once_with(inst, "resize",
body=fake_body)
def test_volume_get(self):
inst = self.instance
vol = inst.volume
att = vol.size
using_get = vol.get("size")
self.assertEqual(att, using_get)
def test_volume_get_fail(self):
inst = self.instance
vol = inst.volume
self.assertRaises(AttributeError, vol.get, "fake")
def test_inst_list_backups(self):
inst = self.instance
mgr = inst.manager
mgr._list_backups_for_instance = Mock()
inst.list_backups()
mgr._list_backups_for_instance.assert_called_once_with(inst, limit=20,
marker=0)
def test_inst_create_backup(self):
inst = self.instance
mgr = inst.manager
name = utils.random_unicode()
description = utils.random_unicode()
mgr.create_backup = Mock()
inst.create_backup(name, description=description)
mgr.create_backup.assert_called_once_with(inst, name,
description=description)
def test_get_flavor_property(self):
inst = self.instance
inst._loaded = True
flavor = inst.flavor
self.assertTrue(isinstance(flavor, CloudDatabaseFlavor))
def test_set_flavor_property_dict(self):
inst = self.instance
inst._loaded = True
inst.flavor = {"name": "test"}
self.assertTrue(isinstance(inst.flavor, CloudDatabaseFlavor))
def test_set_flavor_property_instance(self):
inst = self.instance
inst._loaded = True
flavor = CloudDatabaseFlavor(inst.manager, {"name": "test"})
inst.flavor = flavor
self.assertTrue(isinstance(inst.flavor, CloudDatabaseFlavor))
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_list_databases_for_instance(self):
clt = self.client
inst = self.instance
limit = utils.random_unicode()
marker = utils.random_unicode()
inst.list_databases = Mock(return_value=["db"])
ret = clt.list_databases(inst, limit=limit, marker=marker)
self.assertEqual(ret, ["db"])
inst.list_databases.assert_called_once_with(limit=limit, marker=marker)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_create_database_for_instance(self):
clt = self.client
inst = self.instance
inst.create_database = Mock(return_value=["db"])
nm = utils.random_unicode()
ret = clt.create_database(inst, nm)
self.assertEqual(ret, ["db"])
inst.create_database.assert_called_once_with(nm,
character_set=None, collate=None)
def test_clt_get_database(self):
clt = self.client
inst = self.instance
inst.get_database = Mock()
nm = utils.random_unicode()
clt.get_database(inst, nm)
inst.get_database.assert_called_once_with(nm)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_delete_database_for_instance(self):
clt = self.client
inst = self.instance
inst.delete_database = Mock()
nm = utils.random_unicode()
clt.delete_database(inst, nm)
inst.delete_database.assert_called_once_with(nm)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_list_users_for_instance(self):
clt = self.client
inst = self.instance
limit = utils.random_unicode()
marker = utils.random_unicode()
inst.list_users = Mock(return_value=["user"])
ret = clt.list_users(inst, limit=limit, marker=marker)
self.assertEqual(ret, ["user"])
inst.list_users.assert_called_once_with(limit=limit, marker=marker)
def test_create_user_for_instance(self):
clt = self.client
inst = self.instance
inst.create_user = Mock()
nm = utils.random_unicode()
pw = utils.random_unicode()
host = utils.random_unicode()
ret = clt.create_user(inst, nm, pw, ["db"], host=host)
inst.create_user.assert_called_once_with(name=nm, password=pw,
database_names=["db"], host=host)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_delete_user_for_instance(self):
clt = self.client
inst = self.instance
inst.delete_user = Mock()
nm = utils.random_unicode()
clt.delete_user(inst, nm)
inst.delete_user.assert_called_once_with(nm)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_enable_root_user_for_instance(self):
clt = self.client
inst = self.instance
inst.enable_root_user = Mock()
clt.enable_root_user(inst)
inst.enable_root_user.assert_called_once_with()
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_root_user_status_for_instance(self):
clt = self.client
inst = self.instance
inst.root_user_status = Mock()
clt.root_user_status(inst)
inst.root_user_status.assert_called_once_with()
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_get_user_by_client(self):
clt = self.client
inst = self.instance
inst.get_user = Mock()
fakeuser = utils.random_unicode()
clt.get_user(inst, fakeuser)
inst.get_user.assert_called_once_with(fakeuser)
def test_get_user(self):
inst = self.instance
good_name = utils.random_unicode()
user = fakes.FakeDatabaseUser(manager=None, info={"name": good_name})
inst._user_manager.get = Mock(return_value=user)
returned = inst.get_user(good_name)
self.assertEqual(returned, user)
def test_get_user_fail(self):
inst = self.instance
bad_name = utils.random_unicode()
inst._user_manager.get = Mock(side_effect=exc.NotFound(""))
self.assertRaises(exc.NoSuchDatabaseUser, inst.get_user, bad_name)
def test_get_db_names(self):
inst = self.instance
mgr = inst._user_manager
mgr.instance = inst
dbname1 = utils.random_ascii()
dbname2 = utils.random_ascii()
inst.list_databases = Mock(return_value=((dbname1, dbname2)))
resp = mgr._get_db_names(dbname1)
self.assertEqual(resp, [dbname1])
def test_get_db_names_not_strict(self):
inst = self.instance
mgr = inst._user_manager
mgr.instance = inst
dbname1 = utils.random_ascii()
dbname2 = utils.random_ascii()
inst.list_databases = Mock(return_value=((dbname1, dbname2)))
resp = mgr._get_db_names("BAD", strict=False)
self.assertEqual(resp, ["BAD"])
def test_get_db_names_fail(self):
inst = self.instance
mgr = inst._user_manager
mgr.instance = inst
dbname1 = utils.random_ascii()
dbname2 = utils.random_ascii()
inst.list_databases = Mock(return_value=((dbname1, dbname2)))
self.assertRaises(exc.NoSuchDatabase, mgr._get_db_names, "BAD")
def test_change_user_password(self):
inst = self.instance
fakename = utils.random_ascii()
newpass = utils.random_ascii()
resp = fakes.FakeResponse()
resp.status_code = 202
inst._user_manager.api.method_put = Mock(return_value=(resp, {}))
fakeuser = fakes.FakeDatabaseUser(inst._user_manager, {"name": fakename})
inst._user_manager.get = Mock(return_value=fakeuser)
inst.change_user_password(fakename, newpass)
inst._user_manager.api.method_put.assert_called_once_with(
"/None/%s" % fakename, body={"user": {"password": newpass}})
def test_update_user(self):
inst = self.instance
mgr = inst._user_manager
user = utils.random_unicode()
name = utils.random_unicode()
password = utils.random_unicode()
host = utils.random_unicode()
mgr.update = Mock()
inst.update_user(user, name=name, password=password, host=host)
mgr.update.assert_called_once_with(user, name=name, password=password,
host=host)
def test_user_manager_update(self):
inst = self.instance
mgr = inst._user_manager
username = utils.random_unicode()
user = fakes.FakeDatabaseUser(mgr, info={"name": username})
name = utils.random_unicode()
host = utils.random_unicode()
password = utils.random_unicode()
mgr.api.method_put = Mock(return_value=(None, None))
expected_uri = "/%s/%s" % (mgr.uri_base, username)
expected_body = {"user": {"name": name, "host": host,
"password": password}}
mgr.update(user, name=name, host=host, password=password)
mgr.api.method_put.assert_called_once_with(expected_uri,
body=expected_body)
def test_user_manager_update_missing(self):
inst = self.instance
mgr = inst._user_manager
username = utils.random_unicode()
user = fakes.FakeDatabaseUser(mgr, info={"name": username})
self.assertRaises(exc.MissingDBUserParameters, mgr.update, user)
def test_user_manager_update_unchanged(self):
inst = self.instance
mgr = inst._user_manager
username = utils.random_unicode()
user = fakes.FakeDatabaseUser(mgr, info={"name": username})
self.assertRaises(exc.DBUpdateUnchanged, mgr.update, user,
name=username)
def test_list_user_access(self):
inst = self.instance
dbname1 = utils.random_ascii()
dbname2 = utils.random_ascii()
acc = {"databases": [{"name": dbname1}, {"name": dbname2}]}
inst._user_manager.api.method_get = Mock(return_value=(None, acc))
db_list = inst.list_user_access("fakeuser")
self.assertEqual(len(db_list), 2)
self.assertTrue(db_list[0].name in (dbname1, dbname2))
def test_list_user_access_not_found(self):
inst = self.instance
mgr = inst._user_manager
mgr.api.method_get = Mock(side_effect=exc.NotFound(""))
username = utils.random_unicode()
user = fakes.FakeDatabaseUser(mgr, info={"name": username})
self.assertRaises(exc.NoSuchDatabaseUser, mgr.list_user_access, user)
def test_grant_user_access(self):
inst = self.instance
fakeuser = utils.random_ascii()
dbname1 = utils.random_ascii()
inst._user_manager.api.method_put = Mock(return_value=(None, None))
inst.grant_user_access(fakeuser, dbname1, strict=False)
inst._user_manager.api.method_put.assert_called_once_with(
"/None/%s/databases" % fakeuser, body={"databases": [{"name":
dbname1}]})
def test_grant_user_access_not_found(self):
inst = self.instance
mgr = inst._user_manager
mgr.api.method_put = Mock(side_effect=exc.NotFound(""))
username = utils.random_unicode()
user = fakes.FakeDatabaseUser(mgr, info={"name": username})
db_names = utils.random_unicode()
mgr._get_db_names = Mock(return_value=[])
self.assertRaises(exc.NoSuchDatabaseUser, mgr.grant_user_access, user,
db_names)
def test_revoke_user_access(self):
inst = self.instance
fakeuser = utils.random_ascii()
dbname1 = utils.random_ascii()
inst._user_manager.api.method_delete = Mock(return_value=(None, None))
inst.revoke_user_access(fakeuser, dbname1, strict=False)
inst._user_manager.api.method_delete.assert_called_once_with(
"/None/%s/databases/%s" % (fakeuser, dbname1))
def test_backup_mgr_create_body(self):
inst = self.instance
mgr = inst.manager
bu_mgr = mgr.api._backup_manager
name = utils.random_unicode()
description = utils.random_unicode()
expected_body = {"backup": {"instance": inst.id, "name": name,
"description": description}}
ret = bu_mgr._create_body(name, inst, description=description)
self.assertEqual(ret, expected_body)
def test_backup_mgr_list(self):
inst = self.instance
mgr = inst.manager
bu_mgr = mgr.api._backup_manager
fake_val = utils.random_unicode()
bu_mgr._list = Mock(return_value=fake_val)
ret = bu_mgr.list()
self.assertEqual(ret, fake_val)
def test_backup_mgr_list_instance(self):
inst = self.instance
mgr = inst.manager
bu_mgr = mgr.api._backup_manager
db_mgr = mgr.api._manager
db_mgr._list_backups_for_instance = Mock()
bu_mgr.list(instance=inst)
db_mgr._list_backups_for_instance.assert_called_once_with(inst, limit=20,
marker=0)
def test_clt_change_user_password(self):
clt = self.client
inst = self.instance
inst.change_user_password = Mock()
user = utils.random_unicode()
pw = utils.random_unicode()
clt.change_user_password(inst, user, pw)
inst.change_user_password.assert_called_once_with(user, pw)
def test_user_change_password(self):
inst = self.instance
mgr = inst.manager
password = utils.random_unicode()
user = CloudDatabaseUser(mgr, info={"name": "fake"})
mgr.change_user_password = Mock()
user.change_password(password)
mgr.change_user_password.assert_called_once_with(user, password)
def test_clt_update_user(self):
clt = self.client
inst = self.instance
inst.update_user = Mock()
user = utils.random_unicode()
name = utils.random_unicode()
password = utils.random_unicode()
host = utils.random_unicode()
clt.update_user(inst, user, name=name, password=password, host=host)
inst.update_user.assert_called_once_with(user, name=name,
password=password, host=host)
def test_user_update(self):
inst = self.instance
mgr = inst.manager
name = utils.random_unicode()
password = utils.random_unicode()
host = utils.random_unicode()
user = CloudDatabaseUser(mgr, info={"name": "fake"})
mgr.update = Mock()
user.update(name=name, password=password, host=host)
mgr.update.assert_called_once_with(user, name=name, password=password,
host=host)
def test_clt_list_user_access(self):
clt = self.client
inst = self.instance
inst.list_user_access = Mock()
user = utils.random_unicode()
clt.list_user_access(inst, user)
inst.list_user_access.assert_called_once_with(user)
def test_user_list_user_access(self):
inst = self.instance
mgr = inst.manager
user = CloudDatabaseUser(mgr, info={"name": "fake"})
mgr.list_user_access = Mock()
user.list_user_access()
mgr.list_user_access.assert_called_once_with(user)
def test_clt_grant_user_access(self):
clt = self.client
inst = self.instance
inst.grant_user_access = Mock()
user = utils.random_unicode()
db_names = utils.random_unicode()
clt.grant_user_access(inst, user, db_names)
inst.grant_user_access.assert_called_once_with(user, db_names,
strict=True)
def test_user_grant_user_access(self):
inst = self.instance
mgr = inst.manager
user = CloudDatabaseUser(mgr, info={"name": "fake"})
db_names = utils.random_unicode()
strict = utils.random_unicode()
mgr.grant_user_access = Mock()
user.grant_user_access(db_names, strict=strict)
mgr.grant_user_access.assert_called_once_with(user, db_names,
strict=strict)
def test_clt_revoke_user_access(self):
clt = self.client
inst = self.instance
inst.revoke_user_access = Mock()
user = utils.random_unicode()
db_names = utils.random_unicode()
clt.revoke_user_access(inst, user, db_names)
inst.revoke_user_access.assert_called_once_with(user, db_names,
strict=True)
def test_user_revoke_user_access(self):
inst = self.instance
mgr = inst.manager
user = CloudDatabaseUser(mgr, info={"name": "fake"})
db_names = utils.random_unicode()
strict = utils.random_unicode()
mgr.revoke_user_access = Mock()
user.revoke_user_access(db_names, strict=strict)
mgr.revoke_user_access.assert_called_once_with(user, db_names,
strict=strict)
def test_clt_restart(self):
clt = self.client
inst = self.instance
inst.restart = Mock()
clt.restart(inst)
inst.restart.assert_called_once_with()
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_inst_resize(self):
clt = self.client
inst = self.instance
inst.resize = Mock()
clt.resize(inst, "flavor")
inst.resize.assert_called_once_with("flavor")
def test_get_limits(self):
self.assertRaises(NotImplementedError, self.client.get_limits)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_list_flavors(self):
clt = self.client
clt._flavor_manager.list = Mock()
limit = utils.random_unicode()
marker = utils.random_unicode()
clt.list_flavors(limit=limit, marker=marker)
clt._flavor_manager.list.assert_called_once_with(limit=limit,
marker=marker)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_get_flavor(self):
clt = self.client
clt._flavor_manager.get = Mock()
clt.get_flavor("flavorid")
clt._flavor_manager.get.assert_called_once_with("flavorid")
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_get_flavor_ref_for_obj(self):
clt = self.client
info = {"id": 1,
"name": "test_flavor",
"ram": 42,
"links": [{
"href": example_uri,
"rel": "self"}]}
flavor_obj = CloudDatabaseFlavor(clt._manager, info)
ret = clt._get_flavor_ref(flavor_obj)
self.assertEqual(ret, example_uri)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_get_flavor_ref_for_id(self):
clt = self.client
info = {"id": 1,
"name": "test_flavor",
"ram": 42,
"links": [{
"href": example_uri,
"rel": "self"}]}
flavor_obj = CloudDatabaseFlavor(clt._manager, info)
clt.get_flavor = Mock(return_value=flavor_obj)
ret = clt._get_flavor_ref(1)
self.assertEqual(ret, example_uri)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_get_flavor_ref_for_name(self):
clt = self.client
info = {"id": 1,
"name": "test_flavor",
"ram": 42,
"links": [{
"href": example_uri,
"rel": "self"}]}
flavor_obj = CloudDatabaseFlavor(clt._manager, info)
clt.get_flavor = Mock(side_effect=exc.NotFound(""))
clt.list_flavors = Mock(return_value=[flavor_obj])
ret = clt._get_flavor_ref("test_flavor")
self.assertEqual(ret, example_uri)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_get_flavor_ref_for_ram(self):
clt = self.client
info = {"id": 1,
"name": "test_flavor",
"ram": 42,
"links": [{
"href": example_uri,
"rel": "self"}]}
flavor_obj = CloudDatabaseFlavor(clt._manager, info)
clt.get_flavor = Mock(side_effect=exc.NotFound(""))
clt.list_flavors = Mock(return_value=[flavor_obj])
ret = clt._get_flavor_ref(42)
self.assertEqual(ret, example_uri)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_get_flavor_ref_not_found(self):
clt = self.client
info = {"id": 1,
"name": "test_flavor",
"ram": 42,
"links": [{
"href": example_uri,
"rel": "self"}]}
flavor_obj = CloudDatabaseFlavor(clt._manager, info)
clt.get_flavor = Mock(side_effect=exc.NotFound(""))
clt.list_flavors = Mock(return_value=[flavor_obj])
self.assertRaises(exc.FlavorNotFound, clt._get_flavor_ref, "nonsense")
def test_clt_list_backups(self):
clt = self.client
mgr = clt._backup_manager
mgr.list = Mock()
clt.list_backups()
mgr.list.assert_called_once_with(instance=None, limit=20, marker=0)
def test_clt_list_backups_for_instance(self):
clt = self.client
mgr = clt._backup_manager
mgr.list = Mock()
inst = utils.random_unicode()
clt.list_backups(instance=inst)
mgr.list.assert_called_once_with(instance=inst, limit=20, marker=0)
def test_clt_get_backup(self):
clt = self.client
mgr = clt._backup_manager
mgr.get = Mock()
backup = utils.random_unicode()
clt.get_backup(backup)
mgr.get.assert_called_once_with(backup)
def test_clt_delete_backup(self):
clt = self.client
mgr = clt._backup_manager
mgr.delete = Mock()
backup = utils.random_unicode()
clt.delete_backup(backup)
mgr.delete.assert_called_once_with(backup)
def test_clt_create_backup(self):
clt = self.client
inst = self.instance
name = utils.random_unicode()
description = utils.random_unicode()
inst.create_backup = Mock()
clt.create_backup(inst, name, description=description)
inst.create_backup.assert_called_once_with(name,
description=description)
def test_clt_restore_backup(self):
clt = self.client
mgr = clt._manager
backup = utils.random_unicode()
name = utils.random_unicode()
flavor = utils.random_unicode()
volume = utils.random_unicode()
mgr.restore_backup = Mock()
clt.restore_backup(backup, name, flavor, volume)
mgr.restore_backup.assert_called_once_with(backup, name, flavor, volume)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_create_body_db(self):
mgr = self.instance._database_manager
nm = utils.random_unicode()
ret = mgr._create_body(nm, character_set="CS", collate="CO")
expected = {"databases": [
{"name": nm,
"character_set": "CS",
"collate": "CO"}]}
self.assertEqual(ret, expected)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_create_body_user(self):
inst = self.instance
mgr = inst._user_manager
nm = utils.random_unicode()
pw = utils.random_unicode()
dbnames = [utils.random_unicode(), utils.random_unicode()]
ret = mgr._create_body(nm, password=pw, database_names=dbnames)
expected = {"users": [
{"name": nm,
"password": pw,
"databases": [{"name": dbnames[0]}, {"name": dbnames[1]}]}]}
self.assertEqual(ret, expected)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_create_body_user_host(self):
inst = self.instance
mgr = inst._user_manager
nm = utils.random_unicode()
host = utils.random_unicode()
pw = utils.random_unicode()
dbnames = [utils.random_unicode(), utils.random_unicode()]
ret = mgr._create_body(nm, host=host, password=pw,
database_names=dbnames)
expected = {"users": [
{"name": nm,
"password": pw,
"host": host,
"databases": [{"name": dbnames[0]}, {"name": dbnames[1]}]}]}
self.assertEqual(ret, expected)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_create_body_flavor(self):
clt = self.client
nm = utils.random_unicode()
clt._get_flavor_ref = Mock(return_value=example_uri)
ret = clt._manager._create_body(nm)
expected = {"instance": {
"name": nm,
"flavorRef": example_uri,
"volume": {"size": 1},
"databases": [],
"users": []}}
self.assertEqual(ret, expected)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_missing_db_parameters(self):
clt = self.client
nm = utils.random_unicode()
clt._get_flavor_ref = Mock(return_value=example_uri)
self.assertRaises(exc.MissingCloudDatabaseParameter,
clt._manager._create_body,nm, version="10")
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_create_body_datastore(self):
clt = self.client
nm = utils.random_unicode()
clt._get_flavor_ref = Mock(return_value=example_uri)
ret = clt._manager._create_body(nm, version="10", type="MariaDB")
expected = {"instance": {
"name": nm,
"flavorRef": example_uri,
"volume": {"size": 1},
"databases": [],
"users": [],
"datastore": {"type": "MariaDB", "version": "10"}}}
self.assertEqual(ret, expected)
@patch("pyrax.manager.BaseManager", new=fakes.FakeManager)
def test_create_body_schedule(self):
exp_id = utils.random_unicode()
expected = {
"schedule": {
"action": 'backup',
"day_of_week": 3,
"hour": 12,
"minute": 0,
"source_id": exp_id,
"source_type": "instance",
"full_backup_retention": True
}
}
ret = self.client._schedule_manager._create_body(None, exp_id,
day_of_week=3, hour=12, full_backup_retention=True)
self.assertEqual(expected, ret)
def test_update_schedule(self):
schedule = utils.random_unicode()
run_now = True
minute = 5
with patch.object(self.client._schedule_manager, "_update") as mupdate:
self.client.update_schedule(schedule, run_now=run_now, minute=minute)
body = {
"schedule": {
"run_now": run_now,
"minute": minute
}
}
uri = "/schedules/%s" % schedule
mupdate.assert_called_once_with(uri, body)
def test_ha_create_body(self):
self.maxDiff = None
expected = {
"ha": {
"datastore": {
"version": "5.6",
"type": "MYSQL"
},
"replicas": [{
"volume": {
"size": 1
},
"flavorRef": "2",
"name": "source_replica1"
}],
"name":"ha-1",
"networks": [
"servicenet",
"publicnet"
],
"acls": [{
"address": "10.0.0.0/0"
}, {
"address": "1.2.3.4/5"
}],
"replica_source": [{
"volume": {
"size": 1
},
"flavorRef": "2",
"name": "source"
}]}}
exp_ds = {
"version": "5.6",
"type": "MYSQL"
}
exp_rs = [{
"volume": {
"size": 1
},
"flavorRef": "2",
"name": "source"
}]
exp_replicas = [{
"volume": {
"size": 1,
},
"flavorRef": "2",
"name": "source_replica1"
}]
exp_nw = ["servicenet", "publicnet"]
exp_acls = [{
"address": "10.0.0.0/0"
}, {
"address": "1.2.3.4/5"
}]
self.assertEqual(expected, self.client._ha_manager._create_body(
"ha-1", exp_ds, exp_rs, exp_replicas, networks=exp_nw,
acls=exp_acls))
def test_ha_add_acl(self):
with patch.object(self.client, "method_post") as mpost:
self.client.create_ha_acl("1234", "1.2.3.4/5")
mpost.assert_called_once_with("/ha/1234/acls",
body={'address': '1.2.3.4/5'})
def test_ha_del_acl(self):
with patch.object(self.client, "method_delete") as mdel:
self.client.delete_ha_acl("1234", "1.2.3.4/5")
mdel.assert_called_once_with("/ha/1234/acls/1.2.3.4%2F5")
def test_ha_add_replica(self):
expbody = {
"add_replica": {
"replica_details": {
"volume": {
"size": 4
},
"flavorRef": "2",
"name": "test"
}
}
}
with patch.object(self.client, "method_post") as mpost:
self.client.create_ha_replica("1234", 'test', 4, "2")
mpost.assert_called_once_with("/ha/1234/action", body=expbody)
def test_ha_del_replica(self):
expbody = {
"remove_replica": "567890"
}
with patch.object(self.client, "method_post") as mpost:
self.client.delete_ha_replica("1234", "567890")
mpost.assert_called_once_with("/ha/1234/action", body=expbody)
def test_ha_resize_vol(self):
mha = Mock()
mha.id = '1234'
mha.volume = {'size': 1}
expbody = {
"resize_volumes": {
"size": 2
}
}
with patch.object(self.client._ha_manager, "get") as mget:
with patch.object(self.client, "method_post") as mpost:
mget.return_value = mha
self.client.resize_ha_volume('1234', 2)
mpost.assert_called_once_with("/ha/1234/action", body=expbody)
def test_ha_resize_vol_error(self):
mha = Mock()
mha.id = "1234"
mha.volume = { 'size': 3 }
with patch.object(self.client._ha_manager, "get") as mget:
with patch.object(self.client, "method_post") as mpost:
mget.return_value = mha
with self.assertRaises(exc.ClientException) as ex:
self.client.resize_ha_volume('1234', 2)
ermsg = ("New volume size must be greater than existing "
"volume size (3)")
self.assertIn(ermsg, str(ex.exception))
self.assertEqual(0, mpost.call_count)
def test_ha_resize_flavor(self):
expbody = {
"resize_flavor": "3"
}
with patch.object(self.client, "method_post") as mpost:
self.client.resize_ha_flavor("1234", "3")
mpost.assert_called_once_with("/ha/1234/action", body=expbody)
if __name__ == "__main__":
unittest.main()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for DRAC power interface
"""
from unittest import mock
from dracclient import constants as drac_constants
from dracclient import exceptions as drac_exceptions
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.drac import common as drac_common
from ironic.drivers.modules.drac import power as drac_power
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = test_utils.INFO_DICT
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
class DracPowerTestCase(test_utils.BaseDracTest):
def setUp(self):
super(DracPowerTestCase, self).setUp()
self.node = obj_utils.create_test_node(self.context,
driver='idrac',
driver_info=INFO_DICT)
def test_get_properties(self, mock_get_drac_client):
expected = drac_common.COMMON_PROPERTIES
driver = drac_power.DracPower()
self.assertEqual(expected, driver.get_properties())
def test_get_power_state(self, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.return_value = drac_constants.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
power_state = task.driver.power.get_power_state(task)
self.assertEqual(states.POWER_ON, power_state)
mock_client.get_power_state.assert_called_once_with()
def test_get_power_state_fail(self, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
exc = drac_exceptions.BaseClientException('boom')
mock_client.get_power_state.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.DracOperationError,
task.driver.power.get_power_state, task)
mock_client.get_power_state.assert_called_once_with()
@mock.patch.object(drac_power.LOG, 'warning', autospec=True)
def test_set_power_state(self, mock_log, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.side_effect = [drac_constants.POWER_ON,
drac_constants.POWER_OFF]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_OFF)
drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_OFF]
mock_client.set_power_state.assert_called_once_with(drac_power_state)
self.assertFalse(mock_log.called)
def test_set_power_state_fail(self, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
exc = drac_exceptions.BaseClientException('boom')
mock_client.set_power_state.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.DracOperationError,
task.driver.power.set_power_state, task,
states.POWER_OFF)
drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_OFF]
mock_client.set_power_state.assert_called_once_with(drac_power_state)
@mock.patch.object(drac_power.LOG, 'warning', autospec=True)
def test_set_power_state_timeout(self, mock_log, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.side_effect = [drac_constants.POWER_ON,
drac_constants.POWER_OFF]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_OFF,
timeout=11)
drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_OFF]
mock_client.set_power_state.assert_called_once_with(drac_power_state)
self.assertFalse(mock_log.called)
@mock.patch.object(drac_power.LOG, 'warning', autospec=True)
def test_reboot_while_powered_on(self, mock_log, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.return_value = drac_constants.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task)
drac_power_state = drac_power.REVERSE_POWER_STATES[states.REBOOT]
mock_client.set_power_state.assert_called_once_with(drac_power_state)
self.assertFalse(mock_log.called)
@mock.patch.object(drac_power.LOG, 'warning', autospec=True)
def test_reboot_while_powered_on_timeout(self, mock_log,
mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.return_value = drac_constants.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task, timeout=42)
drac_power_state = drac_power.REVERSE_POWER_STATES[states.REBOOT]
mock_client.set_power_state.assert_called_once_with(drac_power_state)
self.assertTrue(mock_log.called)
def test_reboot_while_powered_off(self, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.side_effect = [drac_constants.POWER_OFF,
drac_constants.POWER_ON]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task)
drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_ON]
mock_client.set_power_state.assert_called_once_with(drac_power_state)
@mock.patch('time.sleep', autospec=True)
def test_reboot_retries_success(self, mock_sleep, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.side_effect = [drac_constants.POWER_OFF,
drac_constants.POWER_OFF,
drac_constants.POWER_ON]
exc = drac_exceptions.DRACOperationFailed(
drac_messages=['The command failed to set RequestedState'])
mock_client.set_power_state.side_effect = [exc, None]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task)
drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_ON]
self.assertEqual(2, mock_client.set_power_state.call_count)
mock_client.set_power_state.assert_has_calls(
[mock.call(drac_power_state),
mock.call(drac_power_state)])
@mock.patch('time.sleep', autospec=True)
def test_reboot_retries_fail(self, mock_sleep, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.return_value = drac_constants.POWER_OFF
exc = drac_exceptions.DRACOperationFailed(
drac_messages=['The command failed to set RequestedState'])
mock_client.set_power_state.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.DracOperationError,
task.driver.power.reboot, task)
self.assertEqual(drac_power.POWER_STATE_TRIES,
mock_client.set_power_state.call_count)
@mock.patch('time.sleep', autospec=True)
def test_reboot_retries_power_change_success(self, mock_sleep,
mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.side_effect = [drac_constants.POWER_OFF,
drac_constants.POWER_ON]
exc = drac_exceptions.DRACOperationFailed(
drac_messages=['The command failed to set RequestedState'])
mock_client.set_power_state.side_effect = [exc, None]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task)
self.assertEqual(2, mock_client.set_power_state.call_count)
drac_power_state1 = drac_power.REVERSE_POWER_STATES[states.POWER_ON]
drac_power_state2 = drac_power.REVERSE_POWER_STATES[states.REBOOT]
mock_client.set_power_state.assert_has_calls(
[mock.call(drac_power_state1),
mock.call(drac_power_state2)])
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for BigQuery sources and sinks."""
import datetime
import json
import logging
import time
import unittest
import hamcrest as hc
import mock
import apache_beam as beam
from apache_beam.io.google_cloud_platform.bigquery import RowAsDictJsonCoder
from apache_beam.io.google_cloud_platform.bigquery import TableRowJsonCoder
from apache_beam.io.google_cloud_platform.bigquery import parse_table_schema_from_json
from apache_beam.io.google_cloud_platform.internal.clients import bigquery
from apache_beam.internal.google_cloud_platform.json_value import to_json_value
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
from apache_beam.utils.pipeline_options import PipelineOptions
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
HttpError = None
# pylint: enable=wrong-import-order, wrong-import-position
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestRowAsDictJsonCoder(unittest.TestCase):
def test_row_as_dict(self):
coder = RowAsDictJsonCoder()
test_value = {'s': 'abc', 'i': 123, 'f': 123.456, 'b': True}
self.assertEqual(test_value, coder.decode(coder.encode(test_value)))
def json_compliance_exception(self, value):
with self.assertRaises(ValueError) as exn:
coder = RowAsDictJsonCoder()
test_value = {'s': value}
self.assertEqual(test_value, coder.decode(coder.encode(test_value)))
self.assertTrue(bigquery.JSON_COMPLIANCE_ERROR in exn.exception.message)
def test_invalid_json_nan(self):
self.json_compliance_exception(float('nan'))
def test_invalid_json_inf(self):
self.json_compliance_exception(float('inf'))
def test_invalid_json_neg_inf(self):
self.json_compliance_exception(float('-inf'))
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestTableRowJsonCoder(unittest.TestCase):
def test_row_as_table_row(self):
schema_definition = [
('s', 'STRING'),
('i', 'INTEGER'),
('f', 'FLOAT'),
('b', 'BOOLEAN'),
('r', 'RECORD')]
data_defination = [
'abc',
123,
123.456,
True,
{'a': 'b'}]
str_def = '{"s": "abc", "i": 123, "f": 123.456, "b": true, "r": {"a": "b"}}'
schema = bigquery.TableSchema(
fields=[bigquery.TableFieldSchema(name=k, type=v)
for k, v in schema_definition])
coder = TableRowJsonCoder(table_schema=schema)
test_row = bigquery.TableRow(
f=[bigquery.TableCell(v=to_json_value(e)) for e in data_defination])
self.assertEqual(str_def, coder.encode(test_row))
self.assertEqual(test_row, coder.decode(coder.encode(test_row)))
# A coder without schema can still decode.
self.assertEqual(
test_row, TableRowJsonCoder().decode(coder.encode(test_row)))
def test_row_and_no_schema(self):
coder = TableRowJsonCoder()
test_row = bigquery.TableRow(
f=[bigquery.TableCell(v=to_json_value(e))
for e in ['abc', 123, 123.456, True]])
with self.assertRaises(AttributeError) as ctx:
coder.encode(test_row)
self.assertTrue(
ctx.exception.message.startswith('The TableRowJsonCoder requires'))
def json_compliance_exception(self, value):
with self.assertRaises(ValueError) as exn:
schema_definition = [('f', 'FLOAT')]
schema = bigquery.TableSchema(
fields=[bigquery.TableFieldSchema(name=k, type=v)
for k, v in schema_definition])
coder = TableRowJsonCoder(table_schema=schema)
test_row = bigquery.TableRow(
f=[bigquery.TableCell(v=to_json_value(value))])
coder.encode(test_row)
self.assertTrue(bigquery.JSON_COMPLIANCE_ERROR in exn.exception.message)
def test_invalid_json_nan(self):
self.json_compliance_exception(float('nan'))
def test_invalid_json_inf(self):
self.json_compliance_exception(float('inf'))
def test_invalid_json_neg_inf(self):
self.json_compliance_exception(float('-inf'))
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestTableSchemaParser(unittest.TestCase):
def test_parse_table_schema_from_json(self):
string_field = bigquery.TableFieldSchema(
name='s', type='STRING', mode='NULLABLE', description='s description')
number_field = bigquery.TableFieldSchema(
name='n', type='INTEGER', mode='REQUIRED', description='n description')
record_field = bigquery.TableFieldSchema(
name='r', type='RECORD', mode='REQUIRED', description='r description',
fields=[string_field, number_field])
expected_schema = bigquery.TableSchema(fields=[record_field])
json_str = json.dumps({'fields': [
{'name': 'r', 'type': 'RECORD', 'mode': 'REQUIRED',
'description': 'r description', 'fields': [
{'name': 's', 'type': 'STRING', 'mode': 'NULLABLE',
'description': 's description'},
{'name': 'n', 'type': 'INTEGER', 'mode': 'REQUIRED',
'description': 'n description'}]}]})
self.assertEqual(parse_table_schema_from_json(json_str),
expected_schema)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestBigQuerySource(unittest.TestCase):
def test_display_data_item_on_validate_true(self):
source = beam.io.BigQuerySource('dataset.table', validate=True)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', True),
DisplayDataItemMatcher('table', 'dataset.table')]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_table_reference_display_data(self):
source = beam.io.BigQuerySource('dataset.table')
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', False),
DisplayDataItemMatcher('table', 'dataset.table')]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
source = beam.io.BigQuerySource('project:dataset.table')
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', False),
DisplayDataItemMatcher('table', 'project:dataset.table')]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
source = beam.io.BigQuerySource('xyz.com:project:dataset.table')
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation',
False),
DisplayDataItemMatcher('table',
'xyz.com:project:dataset.table')]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_parse_table_reference(self):
source = beam.io.BigQuerySource('dataset.table')
self.assertEqual(source.table_reference.datasetId, 'dataset')
self.assertEqual(source.table_reference.tableId, 'table')
source = beam.io.BigQuerySource('project:dataset.table')
self.assertEqual(source.table_reference.projectId, 'project')
self.assertEqual(source.table_reference.datasetId, 'dataset')
self.assertEqual(source.table_reference.tableId, 'table')
source = beam.io.BigQuerySource('xyz.com:project:dataset.table')
self.assertEqual(source.table_reference.projectId, 'xyz.com:project')
self.assertEqual(source.table_reference.datasetId, 'dataset')
self.assertEqual(source.table_reference.tableId, 'table')
source = beam.io.BigQuerySource(query='my_query')
self.assertEqual(source.query, 'my_query')
self.assertIsNone(source.table_reference)
self.assertTrue(source.use_legacy_sql)
def test_query_only_display_data(self):
source = beam.io.BigQuerySource(query='my_query')
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', False),
DisplayDataItemMatcher('query', 'my_query')]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_specify_query_sql_format(self):
source = beam.io.BigQuerySource(query='my_query', use_standard_sql=True)
self.assertEqual(source.query, 'my_query')
self.assertFalse(source.use_legacy_sql)
def test_specify_query_flattened_records(self):
source = beam.io.BigQuerySource(query='my_query', flatten_results=False)
self.assertFalse(source.flatten_results)
def test_specify_query_unflattened_records(self):
source = beam.io.BigQuerySource(query='my_query', flatten_results=True)
self.assertTrue(source.flatten_results)
def test_specify_query_without_table(self):
source = beam.io.BigQuerySource(query='my_query')
self.assertEqual(source.query, 'my_query')
self.assertIsNone(source.table_reference)
def test_date_partitioned_table_name(self):
source = beam.io.BigQuerySource('dataset.table$20030102', validate=True)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', True),
DisplayDataItemMatcher('table', 'dataset.table$20030102')]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestBigQuerySink(unittest.TestCase):
def test_table_spec_display_data(self):
sink = beam.io.BigQuerySink('dataset.table')
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher('table', 'dataset.table'),
DisplayDataItemMatcher('validation', False)]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_parse_schema_descriptor(self):
sink = beam.io.BigQuerySink(
'dataset.table', schema='s:STRING, n:INTEGER')
self.assertEqual(sink.table_reference.datasetId, 'dataset')
self.assertEqual(sink.table_reference.tableId, 'table')
result_schema = {
field.name: field.type for field in sink.table_schema.fields}
self.assertEqual({'n': 'INTEGER', 's': 'STRING'}, result_schema)
def test_project_table_display_data(self):
sinkq = beam.io.BigQuerySink('PROJECT:dataset.table')
dd = DisplayData.create_from(sinkq)
expected_items = [
DisplayDataItemMatcher('table', 'PROJECT:dataset.table'),
DisplayDataItemMatcher('validation', False)]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_simple_schema_as_json(self):
sink = beam.io.BigQuerySink(
'PROJECT:dataset.table', schema='s:STRING, n:INTEGER')
self.assertEqual(
json.dumps({'fields': [
{'name': 's', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'n', 'type': 'INTEGER', 'mode': 'NULLABLE'}]}),
sink.schema_as_json())
def test_nested_schema_as_json(self):
string_field = bigquery.TableFieldSchema(
name='s', type='STRING', mode='NULLABLE', description='s description')
number_field = bigquery.TableFieldSchema(
name='n', type='INTEGER', mode='REQUIRED', description='n description')
record_field = bigquery.TableFieldSchema(
name='r', type='RECORD', mode='REQUIRED', description='r description',
fields=[string_field, number_field])
schema = bigquery.TableSchema(fields=[record_field])
sink = beam.io.BigQuerySink('dataset.table', schema=schema)
self.assertEqual(
{'fields': [
{'name': 'r', 'type': 'RECORD', 'mode': 'REQUIRED',
'description': 'r description', 'fields': [
{'name': 's', 'type': 'STRING', 'mode': 'NULLABLE',
'description': 's description'},
{'name': 'n', 'type': 'INTEGER', 'mode': 'REQUIRED',
'description': 'n description'}]}]},
json.loads(sink.schema_as_json()))
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestBigQueryReader(unittest.TestCase):
def get_test_rows(self):
now = time.time()
dt = datetime.datetime.utcfromtimestamp(float(now))
ts = dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC')
expected_rows = [
{
'i': 1,
's': 'abc',
'f': 2.3,
'b': True,
't': ts,
'dt': '2016-10-31',
'ts': '22:39:12.627498',
'dt_ts': '2008-12-25T07:30:00',
'r': {'s2': 'b'},
'rpr': [{'s3': 'c', 'rpr2': [{'rs': ['d', 'e'], 's4': None}]}]
},
{
'i': 10,
's': 'xyz',
'f': -3.14,
'b': False,
'rpr': [],
't': None,
'dt': None,
'ts': None,
'dt_ts': None,
'r': None,
}]
nested_schema = [
bigquery.TableFieldSchema(
name='s2', type='STRING', mode='NULLABLE')]
nested_schema_2 = [
bigquery.TableFieldSchema(
name='s3', type='STRING', mode='NULLABLE'),
bigquery.TableFieldSchema(
name='rpr2', type='RECORD', mode='REPEATED', fields=[
bigquery.TableFieldSchema(
name='rs', type='STRING', mode='REPEATED'),
bigquery.TableFieldSchema(
name='s4', type='STRING', mode='NULLABLE')])]
schema = bigquery.TableSchema(
fields=[
bigquery.TableFieldSchema(
name='b', type='BOOLEAN', mode='REQUIRED'),
bigquery.TableFieldSchema(
name='f', type='FLOAT', mode='REQUIRED'),
bigquery.TableFieldSchema(
name='i', type='INTEGER', mode='REQUIRED'),
bigquery.TableFieldSchema(
name='s', type='STRING', mode='REQUIRED'),
bigquery.TableFieldSchema(
name='t', type='TIMESTAMP', mode='NULLABLE'),
bigquery.TableFieldSchema(
name='dt', type='DATE', mode='NULLABLE'),
bigquery.TableFieldSchema(
name='ts', type='TIME', mode='NULLABLE'),
bigquery.TableFieldSchema(
name='dt_ts', type='DATETIME', mode='NULLABLE'),
bigquery.TableFieldSchema(
name='r', type='RECORD', mode='NULLABLE',
fields=nested_schema),
bigquery.TableFieldSchema(
name='rpr', type='RECORD', mode='REPEATED',
fields=nested_schema_2)])
table_rows = [
bigquery.TableRow(f=[
bigquery.TableCell(v=to_json_value('true')),
bigquery.TableCell(v=to_json_value(str(2.3))),
bigquery.TableCell(v=to_json_value(str(1))),
bigquery.TableCell(v=to_json_value('abc')),
# For timestamps cannot use str() because it will truncate the
# number representing the timestamp.
bigquery.TableCell(v=to_json_value('%f' % now)),
bigquery.TableCell(v=to_json_value('2016-10-31')),
bigquery.TableCell(v=to_json_value('22:39:12.627498')),
bigquery.TableCell(v=to_json_value('2008-12-25T07:30:00')),
# For record we cannot use dict because it doesn't create nested
# schemas correctly so we have to use this f,v based format
bigquery.TableCell(v=to_json_value({'f': [{'v': 'b'}]})),
bigquery.TableCell(v=to_json_value([{'v':{'f':[{'v': 'c'}, {'v':[
{'v':{'f':[{'v':[{'v':'d'}, {'v':'e'}]}, {'v':None}]}}]}]}}]))
]),
bigquery.TableRow(f=[
bigquery.TableCell(v=to_json_value('false')),
bigquery.TableCell(v=to_json_value(str(-3.14))),
bigquery.TableCell(v=to_json_value(str(10))),
bigquery.TableCell(v=to_json_value('xyz')),
bigquery.TableCell(v=None),
bigquery.TableCell(v=None),
bigquery.TableCell(v=None),
bigquery.TableCell(v=None),
bigquery.TableCell(v=None),
bigquery.TableCell(v=to_json_value([]))])]
return table_rows, schema, expected_rows
def test_read_from_table(self):
client = mock.Mock()
client.jobs.Insert.return_value = bigquery.Job(
jobReference=bigquery.JobReference(
jobId='somejob'))
table_rows, schema, expected_rows = self.get_test_rows()
client.jobs.GetQueryResults.return_value = bigquery.GetQueryResultsResponse(
jobComplete=True, rows=table_rows, schema=schema)
actual_rows = []
with beam.io.BigQuerySource('dataset.table').reader(client) as reader:
for row in reader:
actual_rows.append(row)
self.assertEqual(actual_rows, expected_rows)
self.assertEqual(schema, reader.schema)
def test_read_from_query(self):
client = mock.Mock()
client.jobs.Insert.return_value = bigquery.Job(
jobReference=bigquery.JobReference(
jobId='somejob'))
table_rows, schema, expected_rows = self.get_test_rows()
client.jobs.GetQueryResults.return_value = bigquery.GetQueryResultsResponse(
jobComplete=True, rows=table_rows, schema=schema)
actual_rows = []
with beam.io.BigQuerySource(query='query').reader(client) as reader:
for row in reader:
actual_rows.append(row)
self.assertEqual(actual_rows, expected_rows)
self.assertEqual(schema, reader.schema)
self.assertTrue(reader.use_legacy_sql)
self.assertTrue(reader.flatten_results)
def test_read_from_query_sql_format(self):
client = mock.Mock()
client.jobs.Insert.return_value = bigquery.Job(
jobReference=bigquery.JobReference(
jobId='somejob'))
table_rows, schema, expected_rows = self.get_test_rows()
client.jobs.GetQueryResults.return_value = bigquery.GetQueryResultsResponse(
jobComplete=True, rows=table_rows, schema=schema)
actual_rows = []
with beam.io.BigQuerySource(
query='query', use_standard_sql=True).reader(client) as reader:
for row in reader:
actual_rows.append(row)
self.assertEqual(actual_rows, expected_rows)
self.assertEqual(schema, reader.schema)
self.assertFalse(reader.use_legacy_sql)
self.assertTrue(reader.flatten_results)
def test_read_from_query_unflatten_records(self):
client = mock.Mock()
client.jobs.Insert.return_value = bigquery.Job(
jobReference=bigquery.JobReference(
jobId='somejob'))
table_rows, schema, expected_rows = self.get_test_rows()
client.jobs.GetQueryResults.return_value = bigquery.GetQueryResultsResponse(
jobComplete=True, rows=table_rows, schema=schema)
actual_rows = []
with beam.io.BigQuerySource(
query='query', flatten_results=False).reader(client) as reader:
for row in reader:
actual_rows.append(row)
self.assertEqual(actual_rows, expected_rows)
self.assertEqual(schema, reader.schema)
self.assertTrue(reader.use_legacy_sql)
self.assertFalse(reader.flatten_results)
def test_using_both_query_and_table_fails(self):
with self.assertRaises(ValueError) as exn:
beam.io.BigQuerySource(table='dataset.table', query='query')
self.assertEqual(exn.exception.message, 'Both a BigQuery table and a'
' query were specified. Please specify only one of '
'these.')
def test_using_neither_query_nor_table_fails(self):
with self.assertRaises(ValueError) as exn:
beam.io.BigQuerySource()
self.assertEqual(exn.exception.message, 'A BigQuery table or a query'
' must be specified')
def test_read_from_table_as_tablerows(self):
client = mock.Mock()
client.jobs.Insert.return_value = bigquery.Job(
jobReference=bigquery.JobReference(
jobId='somejob'))
table_rows, schema, _ = self.get_test_rows()
client.jobs.GetQueryResults.return_value = bigquery.GetQueryResultsResponse(
jobComplete=True, rows=table_rows, schema=schema)
actual_rows = []
# We set the coder to TableRowJsonCoder, which is a signal that
# the caller wants to see the rows as TableRows.
with beam.io.BigQuerySource(
'dataset.table', coder=TableRowJsonCoder).reader(client) as reader:
for row in reader:
actual_rows.append(row)
self.assertEqual(actual_rows, table_rows)
self.assertEqual(schema, reader.schema)
@mock.patch('time.sleep', return_value=None)
def test_read_from_table_and_job_complete_retry(self, patched_time_sleep):
client = mock.Mock()
client.jobs.Insert.return_value = bigquery.Job(
jobReference=bigquery.JobReference(
jobId='somejob'))
table_rows, schema, expected_rows = self.get_test_rows()
# Return jobComplete=False on first call to trigger the code path where
# query needs to handle waiting a bit.
client.jobs.GetQueryResults.side_effect = [
bigquery.GetQueryResultsResponse(
jobComplete=False),
bigquery.GetQueryResultsResponse(
jobComplete=True, rows=table_rows, schema=schema)]
actual_rows = []
with beam.io.BigQuerySource('dataset.table').reader(client) as reader:
for row in reader:
actual_rows.append(row)
self.assertEqual(actual_rows, expected_rows)
def test_read_from_table_and_multiple_pages(self):
client = mock.Mock()
client.jobs.Insert.return_value = bigquery.Job(
jobReference=bigquery.JobReference(
jobId='somejob'))
table_rows, schema, expected_rows = self.get_test_rows()
# Return a pageToken on first call to trigger the code path where
# query needs to handle multiple pages of results.
client.jobs.GetQueryResults.side_effect = [
bigquery.GetQueryResultsResponse(
jobComplete=True, rows=table_rows, schema=schema,
pageToken='token'),
bigquery.GetQueryResultsResponse(
jobComplete=True, rows=table_rows, schema=schema)]
actual_rows = []
with beam.io.BigQuerySource('dataset.table').reader(client) as reader:
for row in reader:
actual_rows.append(row)
# We return expected rows for each of the two pages of results so we
# adjust our expectation below accordingly.
self.assertEqual(actual_rows, expected_rows * 2)
def test_table_schema_without_project(self):
# Reader should pick executing project by default.
source = beam.io.BigQuerySource(table='mydataset.mytable')
options = PipelineOptions(flags=['--project', 'myproject'])
source.pipeline_options = options
reader = source.reader()
self.assertEquals('SELECT * FROM [myproject:mydataset.mytable];',
reader.query)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestBigQueryWriter(unittest.TestCase):
@mock.patch('time.sleep', return_value=None)
def test_no_table_and_create_never(self, patched_time_sleep):
client = mock.Mock()
client.tables.Get.side_effect = HttpError(
response={'status': '404'}, url='', content='')
create_disposition = beam.io.BigQueryDisposition.CREATE_NEVER
with self.assertRaises(RuntimeError) as exn:
with beam.io.BigQuerySink(
'project:dataset.table',
create_disposition=create_disposition).writer(client):
pass
self.assertEqual(
exn.exception.message,
'Table project:dataset.table not found but create disposition is '
'CREATE_NEVER.')
def test_no_table_and_create_if_needed(self):
client = mock.Mock()
table = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project', datasetId='dataset', tableId='table'),
schema=bigquery.TableSchema())
client.tables.Get.side_effect = HttpError(
response={'status': '404'}, url='', content='')
client.tables.Insert.return_value = table
create_disposition = beam.io.BigQueryDisposition.CREATE_IF_NEEDED
with beam.io.BigQuerySink(
'project:dataset.table',
schema='somefield:INTEGER',
create_disposition=create_disposition).writer(client):
pass
self.assertTrue(client.tables.Get.called)
self.assertTrue(client.tables.Insert.called)
@mock.patch('time.sleep', return_value=None)
def test_no_table_and_create_if_needed_and_no_schema(
self, patched_time_sleep):
client = mock.Mock()
client.tables.Get.side_effect = HttpError(
response={'status': '404'}, url='', content='')
create_disposition = beam.io.BigQueryDisposition.CREATE_IF_NEEDED
with self.assertRaises(RuntimeError) as exn:
with beam.io.BigQuerySink(
'project:dataset.table',
create_disposition=create_disposition).writer(client):
pass
self.assertEqual(
exn.exception.message,
'Table project:dataset.table requires a schema. None can be inferred '
'because the table does not exist.')
@mock.patch('time.sleep', return_value=None)
def test_table_not_empty_and_write_disposition_empty(
self, patched_time_sleep):
client = mock.Mock()
client.tables.Get.return_value = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project', datasetId='dataset', tableId='table'),
schema=bigquery.TableSchema())
client.tabledata.List.return_value = bigquery.TableDataList(totalRows=1)
write_disposition = beam.io.BigQueryDisposition.WRITE_EMPTY
with self.assertRaises(RuntimeError) as exn:
with beam.io.BigQuerySink(
'project:dataset.table',
write_disposition=write_disposition).writer(client):
pass
self.assertEqual(
exn.exception.message,
'Table project:dataset.table is not empty but write disposition is '
'WRITE_EMPTY.')
def test_table_empty_and_write_disposition_empty(self):
client = mock.Mock()
table = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project', datasetId='dataset', tableId='table'),
schema=bigquery.TableSchema())
client.tables.Get.return_value = table
client.tabledata.List.return_value = bigquery.TableDataList(totalRows=0)
client.tables.Insert.return_value = table
write_disposition = beam.io.BigQueryDisposition.WRITE_EMPTY
with beam.io.BigQuerySink(
'project:dataset.table',
write_disposition=write_disposition).writer(client):
pass
self.assertTrue(client.tables.Get.called)
self.assertTrue(client.tabledata.List.called)
self.assertFalse(client.tables.Delete.called)
self.assertFalse(client.tables.Insert.called)
def test_table_with_write_disposition_truncate(self):
client = mock.Mock()
table = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project', datasetId='dataset', tableId='table'),
schema=bigquery.TableSchema())
client.tables.Get.return_value = table
client.tables.Insert.return_value = table
write_disposition = beam.io.BigQueryDisposition.WRITE_TRUNCATE
with beam.io.BigQuerySink(
'project:dataset.table',
write_disposition=write_disposition).writer(client):
pass
self.assertTrue(client.tables.Get.called)
self.assertTrue(client.tables.Delete.called)
self.assertTrue(client.tables.Insert.called)
def test_table_with_write_disposition_append(self):
client = mock.Mock()
table = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project', datasetId='dataset', tableId='table'),
schema=bigquery.TableSchema())
client.tables.Get.return_value = table
client.tables.Insert.return_value = table
write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND
with beam.io.BigQuerySink(
'project:dataset.table',
write_disposition=write_disposition).writer(client):
pass
self.assertTrue(client.tables.Get.called)
self.assertFalse(client.tables.Delete.called)
self.assertFalse(client.tables.Insert.called)
def test_rows_are_written(self):
client = mock.Mock()
table = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project', datasetId='dataset', tableId='table'),
schema=bigquery.TableSchema())
client.tables.Get.return_value = table
write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND
insert_response = mock.Mock()
insert_response.insertErrors = []
client.tabledata.InsertAll.return_value = insert_response
with beam.io.BigQuerySink(
'project:dataset.table',
write_disposition=write_disposition).writer(client) as writer:
writer.Write({'i': 1, 'b': True, 's': 'abc', 'f': 3.14})
sample_row = {'i': 1, 'b': True, 's': 'abc', 'f': 3.14}
expected_rows = []
json_object = bigquery.JsonObject()
for k, v in sample_row.iteritems():
json_object.additionalProperties.append(
bigquery.JsonObject.AdditionalProperty(
key=k, value=to_json_value(v)))
expected_rows.append(
bigquery.TableDataInsertAllRequest.RowsValueListEntry(
insertId='_1', # First row ID generated with prefix ''
json=json_object))
client.tabledata.InsertAll.assert_called_with(
bigquery.BigqueryTabledataInsertAllRequest(
projectId='project', datasetId='dataset', tableId='table',
tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest(
rows=expected_rows)))
def test_table_schema_without_project(self):
# Writer should pick executing project by default.
sink = beam.io.BigQuerySink(table='mydataset.mytable')
options = PipelineOptions(flags=['--project', 'myproject'])
sink.pipeline_options = options
writer = sink.writer()
self.assertEquals('myproject', writer.project_id)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestBigQueryWrapper(unittest.TestCase):
def test_delete_non_existing_dataset(self):
client = mock.Mock()
client.datasets.Delete.side_effect = HttpError(
response={'status': '404'}, url='', content='')
wrapper = beam.io.google_cloud_platform.bigquery.BigQueryWrapper(client)
wrapper._delete_dataset('', '')
self.assertTrue(client.datasets.Delete.called)
@mock.patch('time.sleep', return_value=None)
def test_delete_dataset_retries_fail(self, patched_time_sleep):
client = mock.Mock()
client.datasets.Delete.side_effect = ValueError("Cannot delete")
wrapper = beam.io.google_cloud_platform.bigquery.BigQueryWrapper(client)
with self.assertRaises(ValueError) as _:
wrapper._delete_dataset('', '')
self.assertEqual(
beam.io.google_cloud_platform.bigquery.MAX_RETRIES + 1,
client.datasets.Delete.call_count)
self.assertTrue(client.datasets.Delete.called)
def test_delete_non_existing_table(self):
client = mock.Mock()
client.tables.Delete.side_effect = HttpError(
response={'status': '404'}, url='', content='')
wrapper = beam.io.google_cloud_platform.bigquery.BigQueryWrapper(client)
wrapper._delete_table('', '', '')
self.assertTrue(client.tables.Delete.called)
@mock.patch('time.sleep', return_value=None)
def test_delete_table_retries_fail(self, patched_time_sleep):
client = mock.Mock()
client.tables.Delete.side_effect = ValueError("Cannot delete")
wrapper = beam.io.google_cloud_platform.bigquery.BigQueryWrapper(client)
with self.assertRaises(ValueError) as _:
wrapper._delete_table('', '', '')
self.assertTrue(client.tables.Delete.called)
@mock.patch('time.sleep', return_value=None)
def test_delete_dataset_retries_for_timeouts(self, patched_time_sleep):
client = mock.Mock()
client.datasets.Delete.side_effect = [
HttpError(
response={'status': '408'}, url='', content=''),
bigquery.BigqueryDatasetsDeleteResponse()
]
wrapper = beam.io.google_cloud_platform.bigquery.BigQueryWrapper(client)
wrapper._delete_dataset('', '')
self.assertTrue(client.datasets.Delete.called)
@mock.patch('time.sleep', return_value=None)
def test_delete_table_retries_for_timeouts(self, patched_time_sleep):
client = mock.Mock()
client.tables.Delete.side_effect = [
HttpError(
response={'status': '408'}, url='', content=''),
bigquery.BigqueryTablesDeleteResponse()
]
wrapper = beam.io.google_cloud_platform.bigquery.BigQueryWrapper(client)
wrapper._delete_table('', '', '')
self.assertTrue(client.tables.Delete.called)
@mock.patch('time.sleep', return_value=None)
def test_temporary_dataset_is_unique(self, patched_time_sleep):
client = mock.Mock()
client.datasets.Get.return_value = bigquery.Dataset(
datasetReference=bigquery.DatasetReference(
projectId='project_id', datasetId='dataset_id'))
wrapper = beam.io.google_cloud_platform.bigquery.BigQueryWrapper(client)
with self.assertRaises(RuntimeError) as _:
wrapper.create_temporary_dataset('project_id')
self.assertTrue(client.datasets.Get.called)
def test_get_or_create_dataset_created(self):
client = mock.Mock()
client.datasets.Get.side_effect = HttpError(
response={'status': '404'}, url='', content='')
client.datasets.Insert.return_value = bigquery.Dataset(
datasetReference=bigquery.DatasetReference(
projectId='project_id', datasetId='dataset_id'))
wrapper = beam.io.google_cloud_platform.bigquery.BigQueryWrapper(client)
new_dataset = wrapper.get_or_create_dataset('project_id', 'dataset_id')
self.assertEqual(new_dataset.datasetReference.datasetId, 'dataset_id')
def test_get_or_create_dataset_fetched(self):
client = mock.Mock()
client.datasets.Get.return_value = bigquery.Dataset(
datasetReference=bigquery.DatasetReference(
projectId='project_id', datasetId='dataset_id'))
wrapper = beam.io.google_cloud_platform.bigquery.BigQueryWrapper(client)
new_dataset = wrapper.get_or_create_dataset('project_id', 'dataset_id')
self.assertEqual(new_dataset.datasetReference.datasetId, 'dataset_id')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| |
import six
import mock
if six.PY3:
from io import StringIO
else:
from StringIO import StringIO
from ..config import is_number
from ..base import BaseSmartCSVTestCase
import smartcsv
from smartcsv.exceptions import InvalidCSVException
class ValidatorColumnTestCase(BaseSmartCSVTestCase):
def test_valid_value_is_provided(self):
"""Should be validated ok"""
iphone_data = {
'title': 'iPhone 5C',
'price': '799'
}
ipad_data = {
'title': 'iPad mini',
'price': '699'
}
csv_data = """
title,price
{iphone_row}
{ipad_row}""".format(
iphone_row="{title},{price}".format(**iphone_data),
ipad_row="{title},{price}".format(**ipad_data)
)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': True,
'validator': is_number
},
])
iphone = next(reader)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(isinstance(iphone, dict) and isinstance(ipad, dict))
self.assertModelsEquals(iphone, iphone_data)
self.assertModelsEquals(ipad, ipad_data)
def test_invalid_value_is_passed_and_exception_is_raised(self):
"""Should not validate and raise a exception (fail_fast=True)"""
iphone_data = {
'title': 'iPhone 5C',
'price': 'INVALID'
}
ipad_data = {
'title': 'iPad mini',
'price': '699'
}
csv_data = """
title,price
{iphone_row}
{ipad_row}""".format(
iphone_row="{title},{price}".format(**iphone_data),
ipad_row="{title},{price}".format(**ipad_data)
)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': True,
'validator': is_number
},
])
try:
next(reader)
except InvalidCSVException as e:
self.assertTrue(e.errors is not None)
self.assertTrue('price' in e.errors)
def test_invalid_value_is_passed_and_no_exception_is_raised(self):
"""Should not validate and the error be reported on the reader"""
iphone_data = {
'title': 'iPhone 5C',
'price': 'INVALID'
}
ipad_data = {
'title': 'iPad mini',
'price': '699'
}
iphone_row = "{title},{price}".format(**iphone_data)
csv_data = """
title,price
{iphone_row}
{ipad_row}""".format(
iphone_row=iphone_row,
ipad_row="{title},{price}".format(**ipad_data)
)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': True,
'validator': is_number
},
], fail_fast=False)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(isinstance(ipad, dict))
self.assertModelsEquals(ipad, ipad_data)
self.assertTrue(reader.errors is not None)
self.assertTrue('rows' in reader.errors)
self.assertEqual(len(reader.errors['rows']), 1) # 1 row failing
self.assertRowError(
reader.errors, iphone_row, 0, 'price')
def test_not_required_value_empty_is_not_validated(self):
"""Should not try to validate an empty value"""
iphone_data = {
'title': 'iPhone 5C',
'price': ''
}
ipad_data = {
'title': 'iPad mini',
'price': ''
}
csv_data = """
title,price
{iphone_row}
{ipad_row}""".format(
iphone_row="{title},{price}".format(**iphone_data),
ipad_row="{title},{price}".format(**ipad_data)
)
mocked_validator = mock.MagicMock(return_value=True)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': False,
'validator': mocked_validator
},
])
iphone = next(reader)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(isinstance(iphone, dict) and isinstance(ipad, dict))
self.assertModelsEquals(iphone, iphone_data)
self.assertModelsEquals(ipad, ipad_data)
self.assertEqual(mocked_validator.call_count, 0)
def test_default_value_is_not_validated(self):
"""Should not try to validate the default value of an empty column"""
iphone_data = {
'title': 'iPhone 5C',
'price': ''
}
ipad_data = {
'title': 'iPad mini',
'price': ''
}
csv_data = """
title,price
{iphone_row}
{ipad_row}""".format(
iphone_row="{title},{price}".format(**iphone_data),
ipad_row="{title},{price}".format(**ipad_data)
)
mocked_validator = mock.MagicMock(return_value=True)
reader = smartcsv.reader(StringIO(csv_data), columns=[
{'name': 'title', 'required': True},
{
'name': 'price',
'required': False,
'default': 999,
'validator': mocked_validator
},
])
iphone = next(reader)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(isinstance(iphone, dict) and isinstance(ipad, dict))
self.assertModelsEquals(iphone, {
'title': 'iPhone 5C',
'price': 999
})
self.assertModelsEquals(ipad, {
'title': 'iPad mini',
'price': 999
})
# Just the model definition
self.assertEqual(mocked_validator.call_count, 1)
| |
import boto3
import botocore
import time
import logging
import os
from datetime import datetime
import tarfile
from spark_controler.ec2_instance_data_dict import ec2_data_dict
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class EMRController(object):
def __init__(self, profile_name = 'default', aws_access_key = False, aws_secret_access_key = False, region_name = 'us-east-1',
cluster_name = 'Spark-Cluster', master_instance_count = 1,worker_instance_count = 3, master_instance_type = 'm3.xlarge', slave_instance_type = 'm3.xlarge',
key_name = 'EMR_Key', subnet_id = 'subnet-50c2a327', software_version = 'emr-5.5.0', s3_bucket = 'emr-related-files', path_script =os.path.dirname( __file__ ),
additional_job_args=['--packages', 'ai.h2o:sparkling-water-core_2.11:2.1.7', '--conf', 'spark.dynamicAllocation.enabled=false'], set_maxmimum_allocation=True, number_of_executors_per_node=1 ):
self.init_datetime_string = self.get_datetime_str() # Used to create a s3 directory so multiple scripts don't overwrite the same files
self.aws_access_key = aws_access_key # If you don't wan to use a credential from the AWS CLI on your machine set this
self.aws_secret_access_key = aws_secret_access_key # If you don't wan to use a credential from the AWS CLI on your machine set this
self.region_name = region_name # AWS region to run the cluster in i.e. 'us-east-1'
self.cluster_name = cluster_name+'_'+self.init_datetime_string # Application Name on EMR
self.master_instance_count = master_instance_count # Number of master nodes to deploy
self.worker_instance_count = worker_instance_count # Total number of worker instances
self.master_instance_type = master_instance_type # EC2 intance type for the master node(s)
self.slave_instance_type = slave_instance_type # EC2 instance type for the worker nodes
self.key_name = key_name # Your ssh key used to ssh into the master node. i.e. 'My_KEY'
self.subnet_id = subnet_id # The Subnet on AWS for the cluster
self.software_version = software_version # Elastic Map Reduce Version
self.profile_name = profile_name # Define IAM profile name (see: http://boto3.readthedocs.io/en/latest/guide/configuration.html)(config file located at user folder .aws directory)
self.s3_bucket = s3_bucket # S3 Bucket to use for storage
self.path_script = path_script # The path to your python script. If you are running /user/me/script.py set this to '/user/me'. If you are importing this from the same dir leave it default
self.file_to_run = 'test.py' # The file you want to run from the compressed files
self.job_flow_id = None # AWS's unique ID for an EMR Cluster exameple: 'j-17LA5TIOEEEU3'
self.additional_job_args = additional_job_args # Additional args for submitting an application to cluster
self.set_maxmimum_allocation = set_maxmimum_allocation # Calculates the maximum allocation in the cluster to use for the job then sets spark config properties boolean value: True or False
self.number_of_executors_per_node = number_of_executors_per_node # The number of executors per node (only used if set_maxmimum_alocation=True)
def boto_client(self, service):
"""
This will return a boto_client set the service i.e. 'emr' or 's3'.
:return: boto3.client
"""
if self.aws_access_key and self.aws_secret_access_key:
client = boto3.client(service,
aws_access_key_id=self.aws_access_key,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.region_name)
return client
else:
session = boto3.Session(profile_name=self.profile_name)
return session.client(service, region_name=self.region_name)
def load_cluster(self, _spark_properties=False):
"""
Spins up a cluster on AWS EMR.
:param dict _spark_properties: A dict of any default spark properties to set on cluster
:return: the response object from boto
"""
spark_properties = {}
if _spark_properties:
spark_properties = _spark_properties
response = self.boto_client("emr").run_job_flow(
Name=self.cluster_name,
LogUri='s3://'+self.s3_bucket+'/logs',
ReleaseLabel=self.software_version,
Instances={
# 'MasterInstanceType': self.master_instance_type,
# 'SlaveInstanceType': self.slave_instance_type,
# 'InstanceCount': self.instance_count,
'InstanceGroups': [
{
'Name': 'master(s)',
'Market': 'ON_DEMAND',#|'SPOT'
'InstanceRole': 'MASTER',#|'CORE'|'TASK'
# 'BidPrice': 'string',
'InstanceType': self.master_instance_type,
'InstanceCount': self.master_instance_count,
'Configurations': [
{
"Classification": "hadoop-env", #set user environment varaibles in here
"Properties": {
},
"Configurations": [
{
"Classification": "export",
"Properties": {
"JAVA_OPTS" : "-Xms128m -Xmx"+str(int(ec2_data_dict[self.master_instance_type]["memory"])*1024)+"m"
},
"Configurations": [
]
}
]
},
],
# 'EbsConfiguration': {
# 'EbsBlockDeviceConfigs': [
# {
# 'VolumeSpecification': {
# 'VolumeType': 'standard',#gp2, io1, standard
# # 'Iops': 123,
# 'SizeInGB': 100
# },
# 'VolumesPerInstance': 1
# },
# ],
# 'EbsOptimized': True#|False
# },
# 'AutoScalingPolicy': {
# 'Constraints': {
# 'MinCapacity': 123,
# 'MaxCapacity': 123
# },
# # 'Rules': [
# # {
# # 'Name': 'string',
# # 'Description': 'string',
# # 'Action': {
# # 'Market': 'ON_DEMAND'|'SPOT',
# # 'SimpleScalingPolicyConfiguration': {
# # 'AdjustmentType': 'CHANGE_IN_CAPACITY'|'PERCENT_CHANGE_IN_CAPACITY'|'EXACT_CAPACITY',
# # 'ScalingAdjustment': 123,
# # 'CoolDown': 123
# # }
# # },
# #
# # # 'Trigger': {
# # # 'CloudWatchAlarmDefinition': {
# # # 'ComparisonOperator': 'GREATER_THAN_OR_EQUAL'|'GREATER_THAN'|'LESS_THAN'|'LESS_THAN_OR_EQUAL',
# # # 'EvaluationPeriods': 123,
# # # 'MetricName': 'string',
# # # 'Namespace': 'string',
# # # 'Period': 123,
# # # 'Statistic': 'SAMPLE_COUNT'|'AVERAGE'|'SUM'|'MINIMUM'|'MAXIMUM',
# # # 'Threshold': 123.0,
# # # 'Unit': 'NONE'|'SECONDS'|'MICRO_SECONDS'|'MILLI_SECONDS'|'BYTES'|'KILO_BYTES'|'MEGA_BYTES'|'GIGA_BYTES'|'TERA_BYTES'|'BITS'|'KILO_BITS'|'MEGA_BITS'|'GIGA_BITS'|'TERA_BITS'|'PERCENT'|'COUNT'|'BYTES_PER_SECOND'|'KILO_BYTES_PER_SECOND'|'MEGA_BYTES_PER_SECOND'|'GIGA_BYTES_PER_SECOND'|'TERA_BYTES_PER_SECOND'|'BITS_PER_SECOND'|'KILO_BITS_PER_SECOND'|'MEGA_BITS_PER_SECOND'|'GIGA_BITS_PER_SECOND'|'TERA_BITS_PER_SECOND'|'COUNT_PER_SECOND',
# # # 'Dimensions': [
# # # {
# # # 'Key': 'string',
# # # 'Value': 'string'
# # # },
# # # ]
# # # }
# # # }
# #
# # },
# # ]
# }
},
{
'Name': 'slaves',
'Market': 'ON_DEMAND',#|'SPOT'
'InstanceRole': 'CORE',#|'MASTER'|'TASK'
# 'BidPrice': 'string',
'InstanceType': self.slave_instance_type,
'InstanceCount': self.worker_instance_count,
'Configurations': [
{
"Classification": "hadoop-env", #set user environment varaibles in here
"Properties": {
},
"Configurations": [
{
"Classification": "export",
"Properties": {
"JAVA_OPTS" : "-Xms128m -Xmx"+str(int(ec2_data_dict[self.slave_instance_type]["memory"])*1024)+"m"
},
"Configurations": [
]
}
]
},
],
# 'EbsConfiguration': {
# 'EbsBlockDeviceConfigs': [
# {
# 'VolumeSpecification': {
# 'VolumeType': 'standard',#gp2, io1, standard
# # 'Iops': 123,
# 'SizeInGB': 100
# },
# 'VolumesPerInstance': 1
# },
# ],
# 'EbsOptimized': True#|False
# },
# 'AutoScalingPolicy': {
# 'Constraints': {
# 'MinCapacity': 123,
# 'MaxCapacity': 123
# },
# # 'Rules': [
# # {
# # 'Name': 'string',
# # 'Description': 'string',
# # 'Action': {
# # 'Market': 'ON_DEMAND'|'SPOT',
# # 'SimpleScalingPolicyConfiguration': {
# # 'AdjustmentType': 'CHANGE_IN_CAPACITY'|'PERCENT_CHANGE_IN_CAPACITY'|'EXACT_CAPACITY',
# # 'ScalingAdjustment': 123,
# # 'CoolDown': 123
# # }
# # },
# #
# # # 'Trigger': {
# # # 'CloudWatchAlarmDefinition': {
# # # 'ComparisonOperator': 'GREATER_THAN_OR_EQUAL'|'GREATER_THAN'|'LESS_THAN'|'LESS_THAN_OR_EQUAL',
# # # 'EvaluationPeriods': 123,
# # # 'MetricName': 'string',
# # # 'Namespace': 'string',
# # # 'Period': 123,
# # # 'Statistic': 'SAMPLE_COUNT'|'AVERAGE'|'SUM'|'MINIMUM'|'MAXIMUM',
# # # 'Threshold': 123.0,
# # # 'Unit': 'NONE'|'SECONDS'|'MICRO_SECONDS'|'MILLI_SECONDS'|'BYTES'|'KILO_BYTES'|'MEGA_BYTES'|'GIGA_BYTES'|'TERA_BYTES'|'BITS'|'KILO_BITS'|'MEGA_BITS'|'GIGA_BITS'|'TERA_BITS'|'PERCENT'|'COUNT'|'BYTES_PER_SECOND'|'KILO_BYTES_PER_SECOND'|'MEGA_BYTES_PER_SECOND'|'GIGA_BYTES_PER_SECOND'|'TERA_BYTES_PER_SECOND'|'BITS_PER_SECOND'|'KILO_BITS_PER_SECOND'|'MEGA_BITS_PER_SECOND'|'GIGA_BITS_PER_SECOND'|'TERA_BITS_PER_SECOND'|'COUNT_PER_SECOND',
# # # 'Dimensions': [
# # # {
# # # 'Key': 'string',
# # # 'Value': 'string'
# # # },
# # # ]
# # # }
# # # }
# #
# # },
# # ]
# }
},
],
'KeepJobFlowAliveWhenNoSteps': True,
'TerminationProtected': False,
'Ec2KeyName': self.key_name,
'Ec2SubnetId': self.subnet_id
},
Applications=[
{
'Name': 'Spark'
},
{
'Name': 'Hadoop'
}
],
BootstrapActions=[
{
'Name': 'Install Conda',
'ScriptBootstrapAction': {
'Path': 's3://{s3_bucket}/temp/{init_datetime_string}/bootstrap_actions.sh'.format(
s3_bucket=self.s3_bucket,init_datetime_string=self.init_datetime_string),
}
},
# UNCOMMENT FOR AUTOTERMINATE BEHAVIOR
# {
# 'Name': 'idle timeout',
# 'ScriptBootstrapAction': {
# 'Path':'s3n://{}/{}/terminate_idle_cluster.sh'.format(self.s3_bucket + '/' + self.s3_path_temp_files, self.job_name),
# 'Args': ['3600', '300']
# }
# },
],
Configurations=[
# {
# 'Classification': 'spark-env',
# 'Configurations': [
# {
# "Classification": "export",
# "Properties": {
# "PYSPARK_PYTHON": "python34",
# "PYSPARK_PYTHON": "/home/hadoop/conda/bin/python",
# "PYSPARK_DRIVER_PYTHON":"/home/hadoop/conda/bin/python"
# },
# "Configurations": []
# }
# ],
# 'Properties': {
# }
# },
{
"Classification": "hadoop-env",
"Properties": {
},
"Configurations": [
{
"Classification": "export",
"Properties": {
"PYTHONHASHSEED": "123", #This is required for pyspark so all nodes have the same seed
# "HADOOP_DATANODE_HEAPSIZE": "2048",
# "HADOOP_NAMENODE_OPTS": "-XX:GCTimeRatio=19"
},
"Configurations": [
]
}
]
},
# {
# "Classification": "spark",
# "Properties": {
# "maximizeResourceAllocation": "true", #AWS has problems with some instance types with this set (generates wrong spark settings, wtf AWS)
#
# }
# },
{
"Classification": "spark-defaults", #Change values in Spark's spark-defaults.conf file
"Properties": spark_properties,
},
{
"Classification": "yarn-site", #Change values in YARN's yarn-site.xml file
"Properties": {
"yarn.scheduler.maximum-allocation-mb": str(int(ec2_data_dict[self.slave_instance_type]["memory"])*1024 - 1024), #So yarn can use almost the entire amount of RAM -1GB for OS
"yarn.nodemanager.resource.memory-mb": str(int(ec2_data_dict[self.slave_instance_type]["memory"])*1024 - 1024), #
},
},
],
VisibleToAllUsers=True,
JobFlowRole='EMR_EC2_DefaultRole',
ServiceRole='EMR_DefaultRole'
)
logger.info(response)
return response
def add_create_step(self, job_flow_id, master_dns):
"""
This step has to be run directly after the bootstrapping to ensure that
conda has been properly linked to the spark environment.
:param string job_flow_id: The clusters id example: j-17LA5TIOEEEU3
:param string master_dns: the dns address of the master node
:return: the response object from boto3
"""
response = self.boto_client("emr").add_job_flow_steps(
JobFlowId=job_flow_id,
Steps=[
{
'Name': 'setup - copy files',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['aws', 's3', 'cp',
's3://{s3_bucket}/temp/{init_datetime_string}/pyspark_quick_setup.sh'.format(
s3_bucket=self.s3_bucket,init_datetime_string=self.init_datetime_string),
'/home/hadoop/']
}
},
{
'Name': 'setup pyspark with conda',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['sudo', 'bash', '/home/hadoop/pyspark_quick_setup.sh', master_dns]
}
}
]
)
logger.info(response)
return response
def add_spark_submit_step(self, job_flow_id,name_of_script_directory):
"""
Steps for EMR to upload the python files and run them as a spark-submit
on the cluster.
First it uploads the .tar file, then decompresses it, then spark-submits
it.
:param string job_flow_id: The clusters id example: j-17LA5TIOEEEU3
:param string name_of_script_directory: the name of the directory to hold scripts on s3 and master node. The file/directory holding the file should be a unique id to prevent overwritting
:return: the response object from boto
"""
args = []
args.append('spark-submit')
if self.additional_job_args:
for arg in self.additional_job_args:
args.append(arg)
args.append("/home/hadoop/scripts/" + name_of_script_directory + '/' + self.file_to_run)
response = self.boto_client("emr").add_job_flow_steps(
JobFlowId=job_flow_id,
Steps=[
{
'Name': 'Copy_Tar',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['aws', 's3', 'cp',
's3://{s3_bucket}/temp/{name_of_script_directory}/script.tar.gz'.format(
s3_bucket=self.s3_bucket,name_of_script_directory=name_of_script_directory),
'/home/hadoop/scripts/' + name_of_script_directory + '/']
}
},
{
'Name': 'Decompress script.tar.gz',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['tar', 'zxvf', '/home/hadoop/scripts/' + name_of_script_directory + '/script.tar.gz','-C','/home/hadoop/scripts/'+ name_of_script_directory]
}
},
{
'Name': 'Spark Application',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': args
}
}
]
)
logger.info(response)
time.sleep(1)
return response
def create_bucket_on_s3(self, bucket_name):
"""
Checks to see if the bucket exists if not it will create one by that
name.
:param string bucket_name: name of the s3 bucket to store all data from cluster
"""
s3 = self.boto_client("s3")
try:
logger.info("Bucket already exists.")
s3.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e:
logger.info("Bucket does not exist: {error}. I will create it!".format(error=e))
s3.create_bucket(Bucket=bucket_name)
def upload_to_s3(self, path_to_file, bucket_name, path_on_s3):
"""
Uploads a file to s3.
:param string path_to_file: The path of the file on local to upload.
:param string bucket_name: The name of the s3 bucket
:param string path_on_s3: The path and file it should be called on s3.
"""
logger.info(
"Upload file '{file_name}' to bucket '{bucket_name}'".format(file_name=path_on_s3, bucket_name=bucket_name))
s3 = None
if self.aws_access_key and self.aws_secret_access_key:
s3 = self.boto_client("s3")
s3.upload_file(path_to_file, bucket_name, path_on_s3)
else:
s3 = boto3.Session(profile_name=self.profile_name).resource('s3')
s3.Object(bucket_name, path_on_s3)\
.put(Body=open(path_to_file, 'rb'), ContentType='text/x-sh')
def get_maximum_resource_allocation_properties(self,_master_memory,_master_cores,_memory_per_workder_node_gb,_cores_per_worker_node,_number_of_worker_nodes,_executors_per_node = 1):
"""
Will calculate spark configuration settings that maximize resource
allocation within the cluster. Useful when you know you are only going
to run one job at a time or are setting dynamicAllocation to false.
:return: a dictonary of the properties to pass to boto3/AWS/spark
"""
import math
#Set by user
master_memory = int(_master_memory)
master_cores = int(_master_cores)
number_of_worker_nodes = int(_number_of_worker_nodes)
memory_per_workder_node_gb = int(_memory_per_workder_node_gb)
cores_per_worker_node = int(_cores_per_worker_node)
executors_per_node = int(_executors_per_node)
#Change with caution
memory_overhead_coefficient = 0.1
executor_memory_upper_bound_gb = memory_per_workder_node_gb
executor_core_upper_bound = 5
os_reserved_cores = 1
os_reserved_memory_gb = 1
parallelism_per_core = 2
#Calculations from previous variables
availible_master_memory = master_memory - os_reserved_memory_gb
availible_master_cores = master_cores - os_reserved_cores
availible_worker_memory = memory_per_workder_node_gb - os_reserved_memory_gb
availible_worker_cores = cores_per_worker_node - os_reserved_cores
total_memory_per_executor = math.floor(availible_worker_memory/executors_per_node)
overhead_memory_per_executor = math.ceil(total_memory_per_executor*memory_overhead_coefficient)
memory_per_executor = total_memory_per_executor - overhead_memory_per_executor
cores_per_executor = math.floor(availible_worker_cores/executors_per_node)
unused_memory_per_node = availible_worker_memory -(executors_per_node*total_memory_per_executor)
unused_cores_per_node = availible_worker_cores - (executors_per_node*cores_per_executor)
spark_executor_instances = number_of_worker_nodes*executors_per_node
spark_yarn_driver_memoryOverhead = math.ceil(availible_master_memory*memory_overhead_coefficient)*1024
return {
"spark.executor.instances": str(spark_executor_instances),
"spark.yarn.executor.memoryOverhead":str(overhead_memory_per_executor*1024),
"spark.executor.memory": str(int(memory_per_executor*1024))+'m',
"spark.yarn.driver.memoryOverhead":str(spark_yarn_driver_memoryOverhead),
"spark.driver.memory":str(int(min(availible_master_memory-(spark_yarn_driver_memoryOverhead/1024),executor_memory_upper_bound_gb-(executor_memory_upper_bound_gb*memory_overhead_coefficient) )*1024))+'m',
"spark.executor.cores": str(cores_per_executor),
"spark.driver.cores": str(min(availible_master_cores,executor_core_upper_bound)),
"spark.default.parallelism":str(spark_executor_instances*cores_per_executor*parallelism_per_core)
}
def get_datetime_str(self):
"""
Gets a formated datetime string for naming purposes.
"""
return datetime.now().strftime("%Y%m%d.%H:%M:%S.%f")
def generate_job_name(self):
"""
Generates a Job name Key for referencing the EMR cluster on the AWS
Console and through logs.
"""
self.job_name = "{}.{}.{}".format(self.app_name,
self.user,
self.get_datetime_str())
def tar_python_script(self):
"""
Compresses a tar file and saves it.
:return:
"""
# Create tar.gz file
t_file = tarfile.open(os.path.dirname( __file__ )+"/files/script.tar.gz", 'w:gz')
# Add Spark script path to tar.gz file
files = os.listdir(self.path_script)
for f in files:
t_file.add(self.path_script + '/' + f, arcname=f)
# List all files in tar.gz
for f in t_file.getnames():
logger.info("Added %s to tar-file" % f)
t_file.close()
def remove_temp_files(self, s3):
"""
Remove Spark files from temporary bucket. NOT FINISHED TODO
:param s3:
:return:
"""
bucket = s3.Bucket(self.s3_bucket)
for key in bucket.objects.all():
if key.key.startswith(self.job_name) is True:
key.delete()
logger.info("Removed '{}' from bucket for temporary files".format(key.key))
def run(self,execute_type='create'):
"""
This will run the execution of the program. Call this after vars are set.
:param string execute_type: Used to either create a cluster or submit a job. Accepted: 'create' or 'run_job'
"""
if execute_type == 'create':
logger.info(
"*******************************************+**********************************************************")
logger.info("Load config and set up client.")
logger.info(
"*******************************************+**********************************************************")
logger.info("Check if bucket exists otherwise create it and upload files to S3.")
self.create_bucket_on_s3(bucket_name=self.s3_bucket)
self.upload_to_s3(os.path.dirname( __file__ )+"/scripts/bootstrap_actions.sh", bucket_name=self.s3_bucket,
path_on_s3="temp/"+self.init_datetime_string+"/bootstrap_actions.sh")
self.upload_to_s3(os.path.dirname( __file__ )+"/scripts/pyspark_quick_setup.sh", bucket_name=self.s3_bucket,
path_on_s3="temp/"+self.init_datetime_string+"/pyspark_quick_setup.sh")
self.upload_to_s3(os.path.dirname( __file__ )+"/scripts/terminate_idle_cluster.sh", bucket_name=self.s3_bucket,
path_on_s3="temp/"+self.init_datetime_string+"/terminate_idle_cluster.sh")
logger.info(
"*******************************************+**********************************************************")
logger.info("Create cluster and run boostrap.")
spark_properties = {}
if self.set_maxmimum_allocation:
#Get the cores/RAM of worker/master
master_memory = ec2_data_dict[self.master_instance_type]['memory']
master_cores = ec2_data_dict[self.master_instance_type]['cores']
worker_memory = ec2_data_dict[self.slave_instance_type]['memory']
worker_cores = ec2_data_dict[self.slave_instance_type]['cores']
spark_properties = self.get_maximum_resource_allocation_properties(_master_memory=master_memory,_master_cores=master_cores,_memory_per_workder_node_gb=worker_memory,_cores_per_worker_node=worker_cores,_number_of_worker_nodes=self.worker_instance_count,_executors_per_node=self.number_of_executors_per_node)
print('spark_properties:')
print(spark_properties)
#Spin up the cluster
emr_response = self.load_cluster(_spark_properties = spark_properties)
emr_client = self.boto_client("emr")
self.job_flow_id = emr_response.get("JobFlowId")
#wait until cluster is in a ready state
while True:
job_response = emr_client.describe_cluster(
ClusterId=emr_response.get("JobFlowId")
)
time.sleep(10)
if job_response.get("Cluster").get("MasterPublicDnsName") is not None:
master_dns = job_response.get("Cluster").get("MasterPublicDnsName")
step = True
job_state = job_response.get("Cluster").get("Status").get("State")
job_state_reason = job_response.get("Cluster").get("Status").get("StateChangeReason").get("Message")
if job_state in ["TERMINATING","TERMINATED","TERMINATED_WITH_ERRORS"]:
step = False
logger.info(
"Script stops with state: {job_state} "
"and reason: {job_state_reason}".format(job_state=job_state, job_state_reason=job_state_reason))
break
elif job_state in ["WAITING","RUNNING"]:
step = True
break
else: # BOOTSTRAPPING,STARTING
logger.info(job_response)
if step:
logger.info(
"*******************************************+**********************************************************")
logger.info("Run steps.")
add_step_response = self.add_create_step(emr_response.get("JobFlowId"), master_dns)
while True:
list_steps_response = emr_client.list_steps(ClusterId=emr_response.get("JobFlowId"),
StepStates=["COMPLETED"])
time.sleep(10)
if len(list_steps_response.get("Steps")) == len(
add_step_response.get("StepIds")): # make sure that all steps are completed
break
else:
logger.info(emr_client.list_steps(ClusterId=emr_response.get("JobFlowId")))
return True
else:
logger.info("Cannot run steps.")
return False
elif execute_type == 'run_job':
date_time_of_execute = 'test'#self.get_datetime_str()
self.tar_python_script()
self.upload_to_s3(os.path.dirname( __file__ )+'/files/script.tar.gz', bucket_name=self.s3_bucket,
path_on_s3="temp/"+date_time_of_execute+"/script.tar.gz")
self.add_spark_submit_step(self.job_flow_id,date_time_of_execute)
return True
def step_copy_data_between_s3_and_hdfs(self, c, src, dest):
"""
Copy data between S3 and HDFS (not used for now)
:param c: the boto_client
:param src: source location of files
:param dest: the destination on hdfs
:return:
"""
response = c.add_job_flow_steps(
JobFlowId=self.job_flow_id,
Steps=[{
'Name': 'Copy data from S3 to HDFS',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': [
"s3-dist-cp",
"--s3Endpoint=s3-eu-west-1.amazonaws.com",
"--src={}".format(src),
"--dest={}".format(dest)
]
}
}]
)
logger.info("Added step 'Copy data from {} to {}'".format(src, dest))
| |
#!/usr/bin/env python3
import pprint
import argparse
import inspect
import json
import os
import requests
import argh
import subprocess
import urllib.parse
API_ENDPOINT = 'https://api.digitalocean.com/v2'
DEBUG = False
DO_API_TOKEN = os.environ.get('DO_API_TOKEN')
DO_KEYPAIR_ID = os.environ.get('DO_KEYPAIR_ID')
DO_KEY = os.environ.get('DO_KEY')
REGS = ['ams', 'fra', 'lon', 'nyc', 'sfo', 'sgp', 'tor']
class C:
blue = '\033[94m'
green = '\033[92m'
red = '\033[91m'
end = '\033[0m'
def R(msg):
return C.red + msg + C.end
def G(msg):
return C.green + msg + C.end
def B(msg):
return C.blue + msg + C.end
def mergedicts(dict1, dict2):
for k in dict2.keys():
if k in dict1:
if type(dict2[k]) is list:
# dict1[k] is most likely list
dict1[k].extend(dict2[k])
else:
l = list(dict1[k], dict2[k])
dict1[k] = l
else:
dict1[k] = dict2[k]
class DoError(RuntimeError):
pass
def callCheck(command, env=None, stdin=None):
print("about to run\n%s" % command)
if subprocess.call(command.split(), env=env, stdin=stdin):
raise Exception("%s failed." % command)
def print_debug():
import http.client
""" this will print HTTP requests sent """
# http://stackoverflow.com/questions/20658572/
# python-requests-print-entire-http-request-raw
global DEBUG
DEBUG = True
old_send = http.client.HTTPConnection.send
def new_send( self, data ):
print("REQUEST:")
print(urllib.parse.unquote(data.decode()))
return old_send(self, data)
http.client.HTTPConnection.send = new_send
class MyAction(argparse.Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(MyAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
# this gets called when -d/--debug is passed
def __call__(self, parser, namespace, values, option_string=None):
print_debug()
pass
def get_id_by_attr(res_pattern, res_list, attr='name'):
result_list = [i['id'] for i in res_list if i[attr] == res_pattern]
if len(result_list) > 1:
raise DoError("name %s is valid for more ids: %s " %
(res_pattern, result_list))
if len(result_list) == 0:
raise DoError("no resources found for %s, whole list: %s " %
(res_pattern, res_list))
return result_list[0]
def get_regions_string(s):
ret = []
for r in REGS:
t = [ a[-1] for a in s if a.startswith(r) ]
ret.append(r+(",".join(t)))
return " ".join(ret)
class DoManager(object):
def __init__(self, api_key):
self.api_endpoint = API_ENDPOINT
self.api_key = api_key
def all_active_droplets(self, fetch_all=True):
json_out = self.request(path='/droplets/', fetch_all=fetch_all)
return json_out['droplets']
def get_key_id(self, key_name):
return get_id_by_attr(key_name, self.all_ssh_keys())
def get_droplet_id_or_name(self, id_or_name):
if not id_or_name.isdigit():
tmp = get_id_by_attr(id_or_name, self.all_active_droplets())
id = tmp
else:
id = id_or_name
return id
@argh.aliases('nf','newfloatingip')
def new_floating_ip(self, droplet_id):
params = {'droplet_id': droplet_id}
json_out = self.request('/floating_ips', params=params, method='POST')
return json_out['floating_ip']
@argh.aliases('c','create')
def create_droplet(self, name, ssh_keys=DO_KEYPAIR_ID,
image='coreos-stable', region='ams2', size='512mb',
private_networking=False, backups_enabled=False,
user_data=None, ipv6=None):
"Creates droplet. see help for defualts"
ud = None
if user_data:
with open(user_data, 'r') as f:
ud = f.read()
keys = ssh_keys.split(",")
params = {
'name': name,
'size': size,
'image': image,
'region': region,
'private_networking': private_networking,
'user_data': ud,
'ipv6': ipv6,
'backups': backups_enabled,
}
params['ssh_keys'] = keys
json_out = self.request('/droplets', params=params, method='POST')
return json_out['droplet']
def show_droplet(self, did):
json_out = self.request('/droplets/%s' % did)
#self.get_droplet_id_or_name(did))
return json_out['droplet']
@argh.aliases('show')
def show_droplet_readable(self, id):
pprint.pprint(self.show_droplet(id))
def droplet_v2_action(self, id, type, params={}):
params['type'] = type
json_out = self.request('/droplets/%s/actions' % id, params=params, method='POST')
return json_out
@argh.aliases('reboot')
def reboot_droplet(self, did):
json_out = self.droplet_v2_action(did, 'reboot')
json_out.pop('status', None)
return json_out
def power_cycle_droplet(self, id):
json_out = self.droplet_v2_action(id, 'power_cycle')
json_out.pop('status', None)
return json_out
def shutdown_droplet(self, id):
json_out = self.droplet_v2_action(id, 'shutdown')
json_out.pop('status', None)
return json_out
def power_off_droplet(self, id):
json_out = self.droplet_v2_action(id, 'power_off')
json_out.pop('status', None)
return json_out
def power_on_droplet(self, id):
json_out = self.droplet_v2_action(id, 'power_on')
json_out.pop('status', None)
return json_out
def password_reset_droplet(self, id):
json_out = self.droplet_v2_action(id, 'password_reset')
json_out.pop('status', None)
return json_out
def resize_droplet(self, id, size_id):
params = {'size': size_id}
json_out = self.droplet_v2_action(id, 'resize', params)
json_out.pop('status', None)
return json_out
def snapshot_droplet(self, id, name):
params = {'name': name}
json_out = self.droplet_v2_action(id, 'snapshot', params)
json_out.pop('status', None)
return json_out
def restore_droplet(self, id, image_id):
params = {'image': image_id}
json_out = self.droplet_v2_action(id, 'restore', params)
json_out.pop('status', None)
return json_out
@argh.aliases('reb','rebuild')
def rebuild_droplet(self, id, image_id):
params = {'image': image_id}
json_out = self.droplet_v2_action(id, 'rebuild', params)
json_out.pop('status', None)
return json_out
def enable_backups_droplet(self, id):
json_out = self.droplet_v2_action(id, 'enable_backups')
json_out.pop('status', None)
return json_out
def disable_backups_droplet(self, id):
json_out = self.droplet_v2_action(id, 'disable_backups')
json_out.pop('status', None)
return json_out
def rename_droplet(self, id, name):
params = {'name': name}
json_out = self.droplet_v2_action(id, 'rename', params)
json_out.pop('status', None)
return json_out
@argh.aliases('d','destroy')
def destroy_droplet(self, id, force=False):
_id = self.get_droplet_id_or_name(id)
answer = "y"
if not force:
answer = input("Do you really want to remove the droplet[y/n]: ")
if answer == "y":
json_out = self.request('/droplets/%s' % _id, method='DELETE')
json_out.pop('status', None)
return json_out
#regions==========================================
def all_regions(self):
json_out = self.request('/regions/')
return json_out['regions']
#images==========================================
def image_v2_action(self, id, type, params={}):
params = {
'type': type
}
json_out = self.request('/images/%s/actions' % id, params=params, method='POST')
return json_out
def show_image(self, image_id):
params= {'image_id': image_id}
json_out = self.request('/images/%s' % image_id)
return json_out['image']
def destroy_image(self, image_id):
self.request('/images/%s' % id, method='DELETE')
return True
def transfer_image(self, image_id, region_id):
params = {'region': region_id}
json_out = self.image_v2_action(id, 'transfer', params)
json_out.pop('status', None)
return json_out
#ssh_keys=========================================
def all_ssh_keys(self):
json_out = self.request('/account/keys')
return json_out['ssh_keys']
def new_ssh_key(self, name, pub_key):
params = {'name': name, 'public_key': pub_key}
json_out = self.request('/account/keys', params, method='POST')
return json_out['ssh_key']
def show_ssh_key(self, key_id):
json_out = self.request('/account/keys/%s/' % key_id)
return json_out['ssh_key']
def edit_ssh_key(self, key_id, name, pub_key):
params = {'name': name} # v2 API doesn't allow to change key body now
json_out = self.request('/account/keys/%s/' % key_id, params, method='PUT')
return json_out['ssh_key']
def destroy_ssh_key(self, key_id):
self.request('/account/keys/%s' % key_id, method='DELETE')
return True
#domains==========================================
def all_domains(self):
json_out = self.request('/domains/')
return json_out['domains']
def new_domain(self, name, ip):
params = {
'name': name,
'ip_address': ip
}
json_out = self.request('/domains', params=params, method='POST')
return json_out['domain']
def show_domain(self, domain_id):
json_out = self.request('/domains/%s/' % domain_id)
return json_out['domain']
def destroy_domain(self, domain_id):
self.request('/domains/%s' % domain_id, method='DELETE')
return True
def all_domain_records(self, domain_id):
json_out = self.request('/domains/%s/records/' % domain_id)
return json_out['domain_records']
def new_domain_record(self, domain_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {'data': data}
params['type'] = record_type
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = weight
json_out = self.request('/domains/%s/records/' % domain_id, params, method='POST')
return json_out['record']
def show_domain_record(self, domain_id, record_id):
json_out = self.request('/domains/%s/records/%s' % (domain_id, record_id))
return json_out['domain_record']
def destroy_domain_record(self, domain_id, record_id):
self.request('/domains/%s/records/%s' % (domain_id, record_id), method='DELETE')
return True
@argh.aliases('f','floatips')
def list_floatips(self):
json_out = self.request('/floating_ips')
for fip in json_out['floating_ips']:
form = "%s in %s on %s"
dn = "None"
if fip['droplet']:
dn = fip['droplet']['name']
fields = (B(fip['ip']), G(fip['region']['slug']), R(dn))
print(form % fields)
#events(actions in v2 API)========================
@argh.aliases('a','actions')
def show_actions(self, type="all"):
actions = self.show_all_actions()
for a in actions:
if type == "all" or a['type'] == type:
form = "%s on %s, id %s, finished on %s"
sanit = lambda s: str(s) if s else ""
ttype = sanit(a['type'])
rtype = sanit(a['resource_type'])
rid = sanit(a['resource_id'])
compl = sanit(a['completed_at'])
#print(ttype,rtype,rid,compl)
fields = (B(ttype), G(rtype), R(rid), B(compl))
print(form % fields)
def show_all_actions(self):
json_out = self.request('/actions')
return json_out['actions']
def show_action(self, action_id):
json_out = self.request('/actions/%s' % action_id)
return json_out['action']
def show_event(self, event_id):
return self.show_action(event_id)
#low_level========================================
def request(self, path, params={}, method='GET', fetch_all=False):
if not path.startswith('/'):
path = '/'+path
url = self.api_endpoint+path
headers = { 'Authorization': "Bearer %s" % self.api_key }
headers['Content-Type'] = 'application/json'
resp = {}
while True:
tmp = self.request_v2(url, params=params, headers=headers,
method=method)
has_next = False
if 'links' in tmp:
has_next = 'pages' in tmp['links']
if has_next:
has_next = 'next' in tmp['links']['pages']
if fetch_all and has_next:
u = urllib.parse.urlparse(tmp['links']['pages']['next'])
next_page = urllib.parse.parse_qs(u.query)['page'][0]
params['page'] = next_page
del(tmp['links'])
del(tmp['meta'])
#resp.update(tmp)
mergedicts(resp, tmp)
else:
mergedicts(resp, tmp)
break
return resp
def request_v2(self, url, headers={}, params={}, method='GET'):
try:
if method == 'POST':
resp = requests.post(url, data=json.dumps(params), headers=headers, timeout=60)
json_out = resp.json()
elif method == 'DELETE':
resp = requests.delete(url, headers=headers, timeout=60)
json_out = {'status': resp.status_code}
elif method == 'PUT':
resp = requests.put(url, headers=headers, params=params, timeout=60)
json_out = resp.json()
elif method == 'GET':
resp = requests.get(url, headers=headers, params=params, timeout=60)
json_out = resp.json()
else:
raise DoError('Unsupported method %s' % method)
except ValueError: # requests.models.json.JSONDecodeError
raise ValueError("The API server doesn't respond with a valid json_out")
except requests.RequestException as e: # errors from requests
raise RuntimeError(e)
if DEBUG:
print("RESPONSE:")
print(resp.status_code)
print(json.dumps(json_out, sort_keys=True, indent=4))
if resp.status_code != requests.codes.ok: #pylint: disable=no-member
if json_out:
if 'error_message' in json_out:
raise DoError(json_out['error_message'])
elif 'message' in json_out:
raise DoError(json_out['message'])
# The JSON reponse is bad, so raise an exception with the HTTP status
resp.raise_for_status()
if json_out.get('id') == 'not_found':
raise DoError(json_out['message'])
return json_out
@argh.aliases('s')
def ssh(self, fuzzy_name, user='core', key=DO_KEY, port='22'):
chosen = [d for d in self.all_active_droplets()
if fuzzy_name == d['name']]
if len(chosen) > 2 :
raise DoError("name too ambiguous")
if len(chosen) == 0 :
raise DoError("no droplet by that name")
ip = self.get_public_ip(chosen[0])
cmd = "ssh -o IdentitiesOnly=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s -p %s %s@%s" % (DO_KEY, port, user, ip)
callCheck(cmd)
def get_private_ip(self, d):
for n in d['networks']['v4']:
if n['type'] == 'private':
return n['ip_address']
def get_public_ip(self, d):
for n in d['networks']['v4']:
if n['type'] == 'public':
return n['ip_address']
def status(self, s):
if s == "new":
return G(s)
if s == "active":
return B(s)
if s in ["off","archive"]:
return R(s)
def droplets(self, fetch_all=False):
for d in self.all_active_droplets(fetch_all):
form = "%s %s[%s] %s - %s, %s"
fields = (G(str(d['id'])), B(d['name']), G(d['region']['slug']),
self.status(d['status']), self.get_public_ip(d),
self.get_private_ip(d))
print(form % fields)
def avail(self, s):
if s:
return G("available")
else:
return R("not avail")
@argh.aliases('s')
def sizes(self):
json_out = self.request('/sizes/')
for s in json_out['sizes']:
form = "%s: %s vcpus, at %s"
print(form % (R(s['slug']), G(str(s['vcpus'])), B(get_regions_string(s['regions']))))
@argh.aliases('r')
def regions(self):
for r in self.all_regions():
form = "%s: %s, features: %s"
fields = (B(r['slug']),
self.avail(r['available']), ",".join(r['features']))
print(form % fields)
@argh.aliases('i')
@argh.arg('--type', '-t', choices=['application', 'distribution'])
@argh.arg('--private', '-p', default=False)
def images(self, type='', private=False, fetch_all=False):
params = {}
if type:
params = {'type': type}
if private:
params = {'private': 'true'}
for i in self.request('/images/', params=params, fetch_all=fetch_all)['images']:
form = "%s %s at %s"
name = i['slug']
if not name:
name = i['name']
print(form % (R(name), G(str(i['id'])), B(",".join( i['regions'] ) )))
@argh.aliases('k')
def keypairs(self):
for k in self.all_ssh_keys():
form = "%s: id %s, \'%s\'"
fields = (R(k['name']), B(str(k['id'])), k['public_key'])
print(form % fields)
class Proxy(object):
_manager = None
def __new__(cls, *args, **kwargs):
if not cls._manager:
api_token = DO_API_TOKEN
cls._manager = DoManager(api_token)
return cls._manager
#def init():
# """ checks if credentials are present and initalizes the module """
# manager = Proxy()
#
# current_module = __import__(__name__)
#
# for name, method in inspect.getmembers(manager, inspect.ismethod):
# if name != "__init__":
# setattr(current_module, name, method)
if __name__ == "__main__":
do = DoManager(DO_API_TOKEN)
parser = argh.ArghParser()
exposed = [do.create_droplet, do.ssh, do.droplets, do.regions, do.keypairs,
do.destroy_droplet, do.show_droplet_readable, do.images,
do.show_actions, do.rebuild_droplet, do.list_floatips,
do.sizes,
do.new_floating_ip, do.reboot_droplet]
argh.assembling.set_default_command(parser, do.droplets)
parser.add_commands(exposed)
parser.add_argument('-d', '--debug', const=False, action=MyAction)
parser.dispatch()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova import db
from nova import exception
from nova.objects import instance_group
from nova import test
from nova.tests.objects import test_objects
class _TestInstanceGroupObjects(test.TestCase):
def setUp(self):
super(_TestInstanceGroupObjects, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
metadata=None, members=None):
return db.instance_group_create(context, values, policies=policies,
metadata=metadata, members=members)
def test_get_by_uuid(self):
values = self._get_default_values()
metadata = {'key11': 'value1',
'key12': 'value2'}
policies = ['policy1', 'policy2']
members = ['instance_id1', 'instance_id2']
db_result = self._create_instance_group(self.context, values,
metadata=metadata,
policies=policies,
members=members)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.metadetails, metadata)
self.assertEqual(obj_result.members, members)
self.assertEqual(obj_result.policies, policies)
def test_refresh(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, db_result['uuid'],
values)
obj_result.refresh()
self.assertEqual(obj_result.name, 'new_name')
self.assertEqual(set([]), obj_result.obj_what_changed())
def test_save_simple(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
obj_result.name = 'new_name'
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['name'], 'new_name')
def test_save_policies(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
policies = ['policy1', 'policy2']
obj_result.policies = policies
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['policies'], policies)
def test_save_members(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
members = ['instance1', 'instance2']
obj_result.members = members
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['members'], members)
def test_save_metadata(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
metadata = {'foo': 'bar'}
obj_result.metadetails = metadata
obj_result.save()
metadata1 = db.instance_group_metadata_get(self.context,
db_result['uuid'])
for key, value in metadata.iteritems():
self.assertEqual(value, metadata[key])
def test_create(self):
group1 = instance_group.InstanceGroup()
group1.uuid = 'fake-uuid'
group1.name = 'fake-name'
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.uuid, group2.uuid)
self.assertEqual(group1.name, group2.name)
result = db.instance_group_get(self.context, group1.uuid)
self.assertEqual(group1.id, result.id)
self.assertEqual(group1.uuid, result.uuid)
self.assertEqual(group1.name, result.name)
def test_create_with_policies(self):
group1 = instance_group.InstanceGroup()
group1.policies = ['policy1', 'policy2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.policies, group2.policies)
def test_create_with_members(self):
group1 = instance_group.InstanceGroup()
group1.members = ['instance1', 'instance2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.members, group2.members)
def test_create_with_metadata(self):
group1 = instance_group.InstanceGroup()
metadata = {'foo': 'bar'}
group1.metadetails = metadata
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
for key, value in metadata.iteritems():
self.assertEqual(value, group2.metadetails[key])
def test_recreate_fails(self):
group = instance_group.InstanceGroup()
group.create(self.context)
self.assertRaises(exception.ObjectActionError, group.create,
self.context)
def test_destroy(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
group = instance_group.InstanceGroup()
group.id = result.id
group.uuid = result.uuid
group.destroy(self.context)
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get, self.context, result['uuid'])
def _populate_instances(self):
instances = [('f1', 'p1'), ('f2', 'p1'),
('f3', 'p2'), ('f4', 'p2')]
for instance in instances:
values = self._get_default_values()
values['uuid'] = instance[0]
values['project_id'] = instance[1]
self._create_instance_group(self.context, values)
def test_list_all(self):
self._populate_instances()
inst_list = instance_group.InstanceGroupList.get_all(self.context)
groups = db.instance_group_get_all(self.context)
self.assertEqual(len(groups), len(inst_list.objects))
self.assertEqual(len(groups), 4)
for i in range(0, len(groups)):
self.assertTrue(isinstance(inst_list.objects[i],
instance_group.InstanceGroup))
self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid'])
def test_list_by_project_id(self):
self._populate_instances()
project_ids = ['p1', 'p2']
for id in project_ids:
il = instance_group.InstanceGroupList.get_by_project_id(
self.context, id)
groups = db.instance_group_get_all_by_project_id(self.context, id)
self.assertEqual(len(groups), len(il.objects))
self.assertEqual(len(groups), 2)
for i in range(0, len(groups)):
self.assertTrue(isinstance(il.objects[i],
instance_group.InstanceGroup))
self.assertEqual(il.objects[i].uuid, groups[i]['uuid'])
self.assertEqual(il.objects[i].project_id, id)
class TestInstanceGroupObject(test_objects._LocalTest,
_TestInstanceGroupObjects):
pass
class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
_TestInstanceGroupObjects):
pass
| |
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from BattleBase import *
from BattleProps import *
from BattleSounds import *
from toontown.toon.ToonDNA import *
from toontown.suit.SuitDNA import *
from direct.directnotify import DirectNotifyGlobal
import random
import MovieCamera
import MovieUtil
from MovieUtil import calcAvgSuitPos
notify = DirectNotifyGlobal.directNotify.newCategory('MovieThrow')
hitSoundFiles = ('AA_tart_only.ogg', 'AA_slice_only.ogg', 'AA_slice_only.ogg', 'AA_slice_only.ogg', 'AA_slice_only.ogg', 'AA_wholepie_only.ogg', 'AA_wholepie_only.ogg')
tPieLeavesHand = 2.7
tPieHitsSuit = 3.0
tSuitDodges = 2.45
ratioMissToHit = 1.5
tPieShrink = 0.7
pieFlyTaskName = 'MovieThrow-pieFly'
def addHit(dict, suitId, hitCount):
if dict.has_key(suitId):
dict[suitId] += hitCount
else:
dict[suitId] = hitCount
def doThrows(throws):
if len(throws) == 0:
return (None, None)
suitThrowsDict = {}
for throw in throws:
if attackAffectsGroup(throw['track'], throw['level']):
pass
else:
suitId = throw['target']['suit'].doId
if suitThrowsDict.has_key(suitId):
suitThrowsDict[suitId].append(throw)
else:
suitThrowsDict[suitId] = [throw]
suitThrows = suitThrowsDict.values()
def compFunc(a, b):
if len(a) > len(b):
return 1
elif len(a) < len(b):
return -1
return 0
suitThrows.sort(compFunc)
totalHitDict = {}
singleHitDict = {}
groupHitDict = {}
for throw in throws:
if attackAffectsGroup(throw['track'], throw['level']):
for i in xrange(len(throw['target'])):
target = throw['target'][i]
suitId = target['suit'].doId
if target['hp'] > 0:
addHit(groupHitDict, suitId, 1)
addHit(totalHitDict, suitId, 1)
else:
addHit(groupHitDict, suitId, 0)
addHit(totalHitDict, suitId, 0)
else:
suitId = throw['target']['suit'].doId
if throw['target']['hp'] > 0:
addHit(singleHitDict, suitId, 1)
addHit(totalHitDict, suitId, 1)
else:
addHit(singleHitDict, suitId, 0)
addHit(totalHitDict, suitId, 0)
notify.debug('singleHitDict = %s' % singleHitDict)
notify.debug('groupHitDict = %s' % groupHitDict)
notify.debug('totalHitDict = %s' % totalHitDict)
delay = 0.0
mtrack = Parallel()
for st in suitThrows:
if len(st) > 0:
ival = __doSuitThrows(st)
if ival:
mtrack.append(Sequence(Wait(delay), ival))
delay = delay + TOON_THROW_SUIT_DELAY
retTrack = Sequence()
retTrack.append(mtrack)
groupThrowIvals = Parallel()
groupThrows = []
for throw in throws:
if attackAffectsGroup(throw['track'], throw['level']):
groupThrows.append(throw)
for throw in groupThrows:
tracks = None
tracks = __throwGroupPie(throw, 0, groupHitDict)
if tracks:
for track in tracks:
groupThrowIvals.append(track)
retTrack.append(groupThrowIvals)
camDuration = retTrack.getDuration()
camTrack = MovieCamera.chooseThrowShot(throws, suitThrowsDict, camDuration)
return (retTrack, camTrack)
def __doSuitThrows(throws):
toonTracks = Parallel()
delay = 0.0
hitCount = 0
for throw in throws:
if throw['target']['hp'] > 0:
hitCount += 1
else:
break
for throw in throws:
tracks = __throwPie(throw, delay, hitCount)
if tracks:
for track in tracks:
toonTracks.append(track)
delay = delay + TOON_THROW_DELAY
return toonTracks
def __showProp(prop, parent, pos):
prop.reparentTo(parent)
prop.setPos(pos)
def __animProp(props, propName, propType):
if 'actor' == propType:
for prop in props:
prop.play(propName)
elif 'model' == propType:
pass
else:
notify.error('No such propType as: %s' % propType)
def __billboardProp(prop):
scale = prop.getScale()
prop.setBillboardPointWorld()
prop.setScale(scale)
def __suitMissPoint(suit, other = render):
pnt = suit.getPos(other)
pnt.setZ(pnt[2] + suit.getHeight() * 1.3)
return pnt
def __propPreflight(props, suit, toon, battle):
prop = props[0]
toon.update(0)
prop.wrtReparentTo(battle)
props[1].reparentTo(hidden)
for ci in xrange(prop.getNumChildren()):
prop.getChild(ci).setHpr(0, -90, 0)
targetPnt = MovieUtil.avatarFacePoint(suit, other=battle)
prop.lookAt(targetPnt)
def __propPreflightGroup(props, suits, toon, battle):
prop = props[0]
toon.update(0)
prop.wrtReparentTo(battle)
props[1].reparentTo(hidden)
for ci in xrange(prop.getNumChildren()):
prop.getChild(ci).setHpr(0, -90, 0)
avgTargetPt = Point3(0, 0, 0)
for suit in suits:
avgTargetPt += MovieUtil.avatarFacePoint(suit, other=battle)
avgTargetPt /= len(suits)
prop.lookAt(avgTargetPt)
def __piePreMiss(missDict, pie, suitPoint, other = render):
missDict['pie'] = pie
missDict['startScale'] = pie.getScale()
missDict['startPos'] = pie.getPos(other)
v = Vec3(suitPoint - missDict['startPos'])
endPos = missDict['startPos'] + v * ratioMissToHit
missDict['endPos'] = endPos
def __pieMissLerpCallback(t, missDict):
pie = missDict['pie']
newPos = missDict['startPos'] * (1.0 - t) + missDict['endPos'] * t
if t < tPieShrink:
tScale = 0.0001
else:
tScale = (t - tPieShrink) / (1.0 - tPieShrink)
newScale = missDict['startScale'] * max(1.0 - tScale, 0.01)
pie.setPos(newPos)
pie.setScale(newScale)
def __piePreMissGroup(missDict, pies, suitPoint, other = render):
missDict['pies'] = pies
missDict['startScale'] = pies[0].getScale()
missDict['startPos'] = pies[0].getPos(other)
v = Vec3(suitPoint - missDict['startPos'])
endPos = missDict['startPos'] + v * ratioMissToHit
missDict['endPos'] = endPos
notify.debug('startPos=%s' % missDict['startPos'])
notify.debug('v=%s' % v)
notify.debug('endPos=%s' % missDict['endPos'])
def __pieMissGroupLerpCallback(t, missDict):
pies = missDict['pies']
newPos = missDict['startPos'] * (1.0 - t) + missDict['endPos'] * t
if t < tPieShrink:
tScale = 0.0001
else:
tScale = (t - tPieShrink) / (1.0 - tPieShrink)
newScale = missDict['startScale'] * max(1.0 - tScale, 0.01)
for pie in pies:
pie.setPos(newPos)
pie.setScale(newScale)
def __getWeddingCakeSoundTrack(level, hitSuit, node = None):
throwTrack = Sequence()
if hitSuit:
throwSound = globalBattleSoundCache.getSound('AA_throw_wedding_cake.ogg')
songTrack = Sequence()
songTrack.append(Wait(1.0))
songTrack.append(SoundInterval(throwSound, node=node))
splatSound = globalBattleSoundCache.getSound('AA_throw_wedding_cake_cog.ogg')
splatTrack = Sequence()
splatTrack.append(Wait(tPieHitsSuit))
splatTrack.append(SoundInterval(splatSound, node=node))
throwTrack.append(Parallel(songTrack, splatTrack))
else:
throwSound = globalBattleSoundCache.getSound('AA_throw_wedding_cake_miss.ogg')
throwTrack.append(Wait(tSuitDodges))
throwTrack.append(SoundInterval(throwSound, node=node))
return throwTrack
def __getSoundTrack(level, hitSuit, node = None):
if level == UBER_GAG_LEVEL_INDEX:
return __getWeddingCakeSoundTrack(level, hitSuit, node)
throwSound = globalBattleSoundCache.getSound('AA_pie_throw_only.ogg')
throwTrack = Sequence(Wait(2.6), SoundInterval(throwSound, node=node))
if hitSuit:
hitSound = globalBattleSoundCache.getSound(hitSoundFiles[level])
hitTrack = Sequence(Wait(tPieLeavesHand), SoundInterval(hitSound, node=node))
return Parallel(throwTrack, hitTrack)
else:
return throwTrack
def __throwPie(throw, delay, hitCount):
toon = throw['toon']
hpbonus = throw['hpbonus']
target = throw['target']
suit = target['suit']
hp = target['hp']
kbbonus = target['kbbonus']
sidestep = throw['sidestep']
died = target['died']
revived = target['revived']
leftSuits = target['leftSuits']
rightSuits = target['rightSuits']
level = throw['level']
battle = throw['battle']
suitPos = suit.getPos(battle)
origHpr = toon.getHpr(battle)
notify.debug('toon: %s throws tart at suit: %d for hp: %d died: %d' % (toon.getName(),
suit.doId,
hp,
died))
pieName = pieNames[level]
hitSuit = hp > 0
pie = globalPropPool.getProp(pieName)
pieType = globalPropPool.getPropType(pieName)
pie2 = MovieUtil.copyProp(pie)
pies = [pie, pie2]
hands = toon.getRightHands()
splatName = 'splat-' + pieName
if pieName == 'wedding-cake':
splatName = 'splat-birthday-cake'
splat = globalPropPool.getProp(splatName)
splatType = globalPropPool.getPropType(splatName)
toonTrack = Sequence()
toonFace = Func(toon.headsUp, battle, suitPos)
toonTrack.append(Wait(delay))
toonTrack.append(toonFace)
toonTrack.append(ActorInterval(toon, 'throw'))
toonTrack.append(Func(toon.loop, 'neutral'))
toonTrack.append(Func(toon.setHpr, battle, origHpr))
pieShow = Func(MovieUtil.showProps, pies, hands)
pieAnim = Func(__animProp, pies, pieName, pieType)
pieScale1 = LerpScaleInterval(pie, 1.0, pie.getScale(), startScale=MovieUtil.PNT3_NEARZERO)
pieScale2 = LerpScaleInterval(pie2, 1.0, pie2.getScale(), startScale=MovieUtil.PNT3_NEARZERO)
pieScale = Parallel(pieScale1, pieScale2)
piePreflight = Func(__propPreflight, pies, suit, toon, battle)
pieTrack = Sequence(Wait(delay), pieShow, pieAnim, pieScale, Func(battle.movie.needRestoreRenderProp, pies[0]), Wait(tPieLeavesHand - 1.0), piePreflight)
soundTrack = __getSoundTrack(level, hitSuit, toon)
if hitSuit:
pieFly = LerpPosInterval(pie, tPieHitsSuit - tPieLeavesHand, pos=MovieUtil.avatarFacePoint(suit, other=battle), name=pieFlyTaskName, other=battle)
pieHide = Func(MovieUtil.removeProps, pies)
splatShow = Func(__showProp, splat, suit, Point3(0, 0, suit.getHeight()))
splatBillboard = Func(__billboardProp, splat)
splatAnim = ActorInterval(splat, splatName)
splatHide = Func(MovieUtil.removeProp, splat)
pieTrack.append(pieFly)
pieTrack.append(pieHide)
pieTrack.append(Func(battle.movie.clearRenderProp, pies[0]))
pieTrack.append(splatShow)
pieTrack.append(splatBillboard)
pieTrack.append(splatAnim)
pieTrack.append(splatHide)
else:
missDict = {}
if sidestep:
suitPoint = MovieUtil.avatarFacePoint(suit, other=battle)
else:
suitPoint = __suitMissPoint(suit, other=battle)
piePreMiss = Func(__piePreMiss, missDict, pie, suitPoint, battle)
pieMiss = LerpFunctionInterval(__pieMissLerpCallback, extraArgs=[missDict], duration=(tPieHitsSuit - tPieLeavesHand) * ratioMissToHit)
pieHide = Func(MovieUtil.removeProps, pies)
pieTrack.append(piePreMiss)
pieTrack.append(pieMiss)
pieTrack.append(pieHide)
pieTrack.append(Func(battle.movie.clearRenderProp, pies[0]))
if hitSuit:
suitResponseTrack = Sequence()
showDamage = Func(suit.showHpText, -hp, openEnded=0, attackTrack=THROW_TRACK)
updateHealthBar = Func(suit.updateHealthBar, hp)
sival = []
if kbbonus > 0:
suitPos, suitHpr = battle.getActorPosHpr(suit)
suitType = getSuitBodyType(suit.getStyleName())
animTrack = Sequence()
animTrack.append(ActorInterval(suit, 'pie-small-react', duration=0.2))
if suitType == 'a':
animTrack.append(ActorInterval(suit, 'slip-forward', startTime=2.43))
elif suitType == 'b':
animTrack.append(ActorInterval(suit, 'slip-forward', startTime=1.94))
elif suitType == 'c':
animTrack.append(ActorInterval(suit, 'slip-forward', startTime=2.58))
animTrack.append(Func(battle.unlureSuit, suit))
moveTrack = Sequence(Wait(0.2), LerpPosInterval(suit, 0.6, pos=suitPos, other=battle))
sival = Parallel(animTrack, moveTrack)
elif hitCount == 1:
sival = Parallel(ActorInterval(suit, 'pie-small-react'), MovieUtil.createSuitStunInterval(suit, 0.3, 1.3))
else:
sival = ActorInterval(suit, 'pie-small-react')
suitResponseTrack.append(Wait(delay + tPieHitsSuit))
suitResponseTrack.append(showDamage)
suitResponseTrack.append(updateHealthBar)
suitResponseTrack.append(sival)
bonusTrack = Sequence(Wait(delay + tPieHitsSuit))
if kbbonus > 0:
bonusTrack.append(Wait(0.75))
bonusTrack.append(Func(suit.showHpText, -kbbonus, 2, openEnded=0, attackTrack=THROW_TRACK))
if hpbonus > 0:
bonusTrack.append(Wait(0.75))
bonusTrack.append(Func(suit.showHpText, -hpbonus, 1, openEnded=0, attackTrack=THROW_TRACK))
if revived != 0:
suitResponseTrack.append(MovieUtil.createSuitReviveTrack(suit, toon, battle))
elif died != 0:
suitResponseTrack.append(MovieUtil.createSuitDeathTrack(suit, toon, battle))
else:
suitResponseTrack.append(Func(suit.loop, 'neutral'))
suitResponseTrack = Parallel(suitResponseTrack, bonusTrack)
else:
suitResponseTrack = MovieUtil.createSuitDodgeMultitrack(delay + tSuitDodges, suit, leftSuits, rightSuits)
if not hitSuit and delay > 0:
return [toonTrack, soundTrack, pieTrack]
else:
return [toonTrack,
soundTrack,
pieTrack,
suitResponseTrack]
def __createWeddingCakeFlight(throw, groupHitDict, pie, pies):
toon = throw['toon']
battle = throw['battle']
level = throw['level']
sidestep = throw['sidestep']
hpbonus = throw['hpbonus']
numTargets = len(throw['target'])
pieName = pieNames[level]
splatName = 'splat-' + pieName
if pieName == 'wedding-cake':
splatName = 'splat-birthday-cake'
splat = globalPropPool.getProp(splatName)
splats = [splat]
for i in xrange(numTargets - 1):
splats.append(MovieUtil.copyProp(splat))
splatType = globalPropPool.getPropType(splatName)
cakePartStrs = ['cake1',
'cake2',
'cake3',
'caketop']
cakeParts = []
for part in cakePartStrs:
cakeParts.append(pie.find('**/%s' % part))
cakePartDivisions = {}
cakePartDivisions[1] = [[cakeParts[0],
cakeParts[1],
cakeParts[2],
cakeParts[3]]]
cakePartDivisions[2] = [[cakeParts[0], cakeParts[1]], [cakeParts[2], cakeParts[3]]]
cakePartDivisions[3] = [[cakeParts[0], cakeParts[1]], [cakeParts[2]], [cakeParts[3]]]
cakePartDivisions[4] = [[cakeParts[0]],
[cakeParts[1]],
[cakeParts[2]],
[cakeParts[3]]]
cakePartDivToUse = cakePartDivisions[len(throw['target'])]
groupPieTracks = Parallel()
for i in xrange(numTargets):
target = throw['target'][i]
suit = target['suit']
hitSuit = target['hp'] > 0
singlePieTrack = Sequence()
if hitSuit:
piePartReparent = Func(reparentCakePart, pie, cakePartDivToUse[i])
singlePieTrack.append(piePartReparent)
cakePartTrack = Parallel()
for cakePart in cakePartDivToUse[i]:
pieFly = LerpPosInterval(cakePart, tPieHitsSuit - tPieLeavesHand, pos=MovieUtil.avatarFacePoint(suit, other=battle), name=pieFlyTaskName, other=battle)
cakePartTrack.append(pieFly)
singlePieTrack.append(cakePartTrack)
pieRemoveCakeParts = Func(MovieUtil.removeProps, cakePartDivToUse[i])
pieHide = Func(MovieUtil.removeProps, pies)
splatShow = Func(__showProp, splats[i], suit, Point3(0, 0, suit.getHeight()))
splatBillboard = Func(__billboardProp, splats[i])
splatAnim = ActorInterval(splats[i], splatName)
splatHide = Func(MovieUtil.removeProp, splats[i])
singlePieTrack.append(pieRemoveCakeParts)
singlePieTrack.append(pieHide)
singlePieTrack.append(Func(battle.movie.clearRenderProp, pies[0]))
singlePieTrack.append(splatShow)
singlePieTrack.append(splatBillboard)
singlePieTrack.append(splatAnim)
singlePieTrack.append(splatHide)
else:
missDict = {}
if sidestep:
suitPoint = MovieUtil.avatarFacePoint(suit, other=battle)
else:
suitPoint = __suitMissPoint(suit, other=battle)
piePartReparent = Func(reparentCakePart, pie, cakePartDivToUse[i])
piePreMiss = Func(__piePreMissGroup, missDict, cakePartDivToUse[i], suitPoint, battle)
pieMiss = LerpFunctionInterval(__pieMissGroupLerpCallback, extraArgs=[missDict], duration=(tPieHitsSuit - tPieLeavesHand) * ratioMissToHit)
pieHide = Func(MovieUtil.removeProps, pies)
pieRemoveCakeParts = Func(MovieUtil.removeProps, cakePartDivToUse[i])
singlePieTrack.append(piePartReparent)
singlePieTrack.append(piePreMiss)
singlePieTrack.append(pieMiss)
singlePieTrack.append(pieRemoveCakeParts)
singlePieTrack.append(pieHide)
singlePieTrack.append(Func(battle.movie.clearRenderProp, pies[0]))
groupPieTracks.append(singlePieTrack)
return groupPieTracks
def __throwGroupPie(throw, delay, groupHitDict):
toon = throw['toon']
battle = throw['battle']
level = throw['level']
sidestep = throw['sidestep']
hpbonus = throw['hpbonus']
numTargets = len(throw['target'])
avgSuitPos = calcAvgSuitPos(throw)
origHpr = toon.getHpr(battle)
toonTrack = Sequence()
toonFace = Func(toon.headsUp, battle, avgSuitPos)
toonTrack.append(Wait(delay))
toonTrack.append(toonFace)
toonTrack.append(ActorInterval(toon, 'throw'))
toonTrack.append(Func(toon.loop, 'neutral'))
toonTrack.append(Func(toon.setHpr, battle, origHpr))
suits = []
for i in xrange(numTargets):
suits.append(throw['target'][i]['suit'])
pieName = pieNames[level]
pie = globalPropPool.getProp(pieName)
pieType = globalPropPool.getPropType(pieName)
pie2 = MovieUtil.copyProp(pie)
pies = [pie, pie2]
hands = toon.getRightHands()
pieShow = Func(MovieUtil.showProps, pies, hands)
pieAnim = Func(__animProp, pies, pieName, pieType)
pieScale1 = LerpScaleInterval(pie, 1.0, pie.getScale() * 1.5, startScale=MovieUtil.PNT3_NEARZERO)
pieScale2 = LerpScaleInterval(pie2, 1.0, pie2.getScale() * 1.5, startScale=MovieUtil.PNT3_NEARZERO)
pieScale = Parallel(pieScale1, pieScale2)
piePreflight = Func(__propPreflightGroup, pies, suits, toon, battle)
pieTrack = Sequence(Wait(delay), pieShow, pieAnim, pieScale, Func(battle.movie.needRestoreRenderProp, pies[0]), Wait(tPieLeavesHand - 1.0), piePreflight)
if level == UBER_GAG_LEVEL_INDEX:
groupPieTracks = __createWeddingCakeFlight(throw, groupHitDict, pie, pies)
else:
notify.error('unhandled throw level %d' % level)
pieTrack.append(groupPieTracks)
didThrowHitAnyone = False
for i in xrange(numTargets):
target = throw['target'][i]
hitSuit = target['hp'] > 0
if hitSuit:
didThrowHitAnyone = True
soundTrack = __getSoundTrack(level, didThrowHitAnyone, toon)
groupSuitResponseTrack = Parallel()
for i in xrange(numTargets):
target = throw['target'][i]
suit = target['suit']
hitSuit = target['hp'] > 0
leftSuits = target['leftSuits']
rightSuits = target['rightSuits']
hp = target['hp']
kbbonus = target['kbbonus']
died = target['died']
revived = target['revived']
if hitSuit:
singleSuitResponseTrack = Sequence()
showDamage = Func(suit.showHpText, -hp, openEnded=0, attackTrack=THROW_TRACK)
updateHealthBar = Func(suit.updateHealthBar, hp)
sival = []
if kbbonus > 0:
suitPos, suitHpr = battle.getActorPosHpr(suit)
suitType = getSuitBodyType(suit.getStyleName())
animTrack = Sequence()
animTrack.append(ActorInterval(suit, 'pie-small-react', duration=0.2))
if suitType == 'a':
animTrack.append(ActorInterval(suit, 'slip-forward', startTime=2.43))
elif suitType == 'b':
animTrack.append(ActorInterval(suit, 'slip-forward', startTime=1.94))
elif suitType == 'c':
animTrack.append(ActorInterval(suit, 'slip-forward', startTime=2.58))
animTrack.append(Func(battle.unlureSuit, suit))
moveTrack = Sequence(Wait(0.2), LerpPosInterval(suit, 0.6, pos=suitPos, other=battle))
sival = Parallel(animTrack, moveTrack)
elif groupHitDict[suit.doId] == 1:
sival = Parallel(ActorInterval(suit, 'pie-small-react'), MovieUtil.createSuitStunInterval(suit, 0.3, 1.3))
else:
sival = ActorInterval(suit, 'pie-small-react')
singleSuitResponseTrack.append(Wait(delay + tPieHitsSuit))
singleSuitResponseTrack.append(showDamage)
singleSuitResponseTrack.append(updateHealthBar)
singleSuitResponseTrack.append(sival)
bonusTrack = Sequence(Wait(delay + tPieHitsSuit))
if kbbonus > 0:
bonusTrack.append(Wait(0.75))
bonusTrack.append(Func(suit.showHpText, -kbbonus, 2, openEnded=0, attackTrack=THROW_TRACK))
if hpbonus > 0:
bonusTrack.append(Wait(0.75))
bonusTrack.append(Func(suit.showHpText, -hpbonus, 1, openEnded=0, attackTrack=THROW_TRACK))
if revived != 0:
singleSuitResponseTrack.append(MovieUtil.createSuitReviveTrack(suit, toon, battle))
elif died != 0:
singleSuitResponseTrack.append(MovieUtil.createSuitDeathTrack(suit, toon, battle))
else:
singleSuitResponseTrack.append(Func(suit.loop, 'neutral'))
singleSuitResponseTrack = Parallel(singleSuitResponseTrack, bonusTrack)
else:
groupHitValues = groupHitDict.values()
if groupHitValues.count(0) == len(groupHitValues):
singleSuitResponseTrack = MovieUtil.createSuitDodgeMultitrack(delay + tSuitDodges, suit, leftSuits, rightSuits)
else:
singleSuitResponseTrack = Sequence(Wait(tPieHitsSuit - 0.1), Func(MovieUtil.indicateMissed, suit, 1.0))
groupSuitResponseTrack.append(singleSuitResponseTrack)
return [toonTrack,
pieTrack,
soundTrack,
groupSuitResponseTrack]
def reparentCakePart(pie, cakeParts):
pieParent = pie.getParent()
notify.debug('pieParent = %s' % pieParent)
for cakePart in cakeParts:
cakePart.wrtReparentTo(pieParent)
| |
# Copyright (c) 2016, Genevolv LLC. All rights reserved.
''' Data Access Object contains data access functions and manages the
connection cursor.
'''
import logging
LOG = logging.getLogger(__name__)
import datetime
import pyodbc
import uuid
import sys
def get_value_string(value):
'''returns the appropriate MS SQL type value'''
rtn = str(value)
# LOG.debug('type(value): ' + str(type(value)))
if isinstance(value, bool):
if value:
rtn = str(1)
else:
rtn = str(0)
elif isinstance(value, bytes):
rtn = hex(int.from_bytes(value, 'big'))
elif isinstance(value, datetime.datetime):
rtn = "'" + str(value) + "'"
elif isinstance(value, datetime.timedelta):
rtn = "'" + str(value) + "'"
elif isinstance(value, str):
if value == 'NULL':
rtn = 'NULL'
else:
rtn = "'" + value + "'"
elif isinstance(value, uuid.UUID):
rtn = "'" + str(value) + "'"
# LOG.debug('rtn: ' + rtn)
return rtn
class Dao(object):
''' Data Access Object contains data access functions and manages the
connection cursor.
'''
def update_connection_string(self):
''' This function fills in the parts of the connection string
that are platform dependent.
'''
connection_items = {}
# LOG.debug("connection_string before filling in known parts: "\
# + str(self.connection_string))
if self.connection_string[-1:] != ';':
self.connection_string += ';'
connection_items = {
"driver=" : "DRIVER={FreeTDS};",
"tds_version=" : "TDS_VERSION=7.0;",
"port=" : "PORT=1433;",
}
if sys.platform == "win32":
connection_items = {
"driver=" : "Driver={SQL Server};",
"trusted_connection=" : "Trusted_Connection=yes;",
"integrated security=" : "Integrated Security=SSPI;",
"persist security info=" : "Persist Security Info=True;"
}
for key in list(connection_items.keys()):
temp_conn_str = self.connection_string.lower()
begin_index = temp_conn_str.find(key)
end_index = temp_conn_str.find(";", begin_index)
if begin_index >= 0 and end_index > 0:
self.connection_string.replace(
self.connection_string[begin_index:end_index],
connection_items[key])
else:
self.connection_string += connection_items[key]
if self.connection_string[-1:] == ';':
self.connection_string = self.connection_string[:-1]
LOG.debug("connection_string: " + str(self.connection_string))
def __init__(self, connection_string=None, timeout=0, \
connection_attempts=3):
self.connection_string = connection_string
self.connection = None
self.cursor = None
self.timeout = timeout
self.connection_attempts = connection_attempts
if connection_string != None:
import freevolv.utils.kerberos as kerberos
kerb = kerberos.Kerberos()
if kerb.is_needed:
kerb.authenticate()
self.update_connection_string()
for retry in range(0, self.connection_attempts):
try:
self.connection = pyodbc.connect(self.connection_string, \
autocommit=True, timeout=self.timeout)
except pyodbc.Error as err:
LOG.debug("pyodbc.connect() error: " + str(err))
if self.connection_attempts - retry - 1 > 0:
LOG.debug \
("will re-try "
+ str(self.connection_attempts - retry - 1)
+ " times.")
continue
if self.connection == None:
LOG.debug("pyodbc.connect() failed.")
if self.connection_attempts - retry - 1 > 0:
LOG.debug \
("will re-try "
+ str(self.connection_attempts - retry - 1)
+ " times.")
continue
self.cursor = self.connection.cursor()
def __str__(self):
return str(self.connection_string)
def get_cursor(self):
'''returns the current cursor'''
return self.cursor
def begin_transaction(self):
'''turns off autocommit'''
LOG.debug("begin transaction")
self.connection.autocommit = False
def end_transaction(self):
'''commits the transaction log, turns autocommit back on'''
self.connection.commit()
self.connection.autocommit = True
LOG.debug("end transaction")
def select_one(self, query):
'''returns top tuple, warns if more than one'''
LOG.debug(query)
self.cursor.execute(query)
result = self.cursor.fetchall()
rtn = None
if len(result) > 0:
if len(result) > 1:
LOG.warning("Multiple rows returned. Using first")
rtn = result[0]
return rtn
def select_many(self, query):
'''returns a list of tuples'''
LOG.debug(query)
self.cursor.execute(query)
return self.cursor.fetchall()
def insert_one(self, table_name, fields):
'''convenience function for inserting one row'''
fields_list = [fields]
return self.insert_many(table_name, fields_list)
def insert_many(self, table_name, fields_list):
'''insert a list of rows'''
fields = fields_list[0]
columns = ""
for key, value in list(fields.items()):
if value != None:
columns += key
columns += ','
columns = columns[:-1]
rows = ""
for fields in fields_list:
row = "\n ("
for key, value in list(fields.items()):
# LOG.debug('key: ' + str(key) + ' value: ' + str(value))
if value != None:
row += get_value_string(value) + ','
row = row[:-1]
row += "),"
rows += row
rows = rows[:-1]
query = "INSERT INTO " + table_name + "("
query += columns
query += ") VALUES"
query += rows
return self.execute(query)
def update(self, table_name, fields, condition):
'''updates an object in the DB if needed'''
# LOG.debug("table_name: " + str(table_name))
# LOG.debug("fields: " + str(fields))
# LOG.debug("condition: " + str(condition))
lines = []
for key, value in list(fields.items()):
if value != None:
value = get_value_string(value)
line = ' ' + key + ' = ' + value + ','
lines.append(line)
if len(lines) > 0:
line = lines.pop()[:-1]
lines.append(line)
lines = '\n'.join(lines)
query = "UPDATE " + table_name + " SET\n"
query += lines
query += condition
return self.execute(query)
def execute(self, query):
'''executes an SQL statement'''
LOG.debug(query)
self.cursor.execute(query)
rtn = self.cursor.rowcount
LOG.debug(str(rtn) + " row(s) affected")
return rtn
def get_utc_date(self):
'''gets the UTC time of the server'''
query = "SELECT GETUTCDATE() AS UTC_DATE"
LOG.debug(query)
self.cursor.execute(query)
rows = self.cursor.fetchall()
rtn = rows[0][0]
# LOG.debug('type(rtn): ' + str(type(rtn)))
if not isinstance(rtn, datetime.datetime):
rtn = datetime.datetime.fromtimestamp(int(rows[0][0]))
return rtn
| |
from __future__ import unicode_literals
import os
import subprocess
import sys
import time
from .common import AudioConversionError, PostProcessor
from ..compat import (
compat_subprocess_get_DEVNULL,
)
from ..utils import (
encodeArgument,
encodeFilename,
get_exe_version,
is_outdated_version,
PostProcessingError,
prepend_extension,
shell_quote,
subtitles_filename,
)
class FFmpegPostProcessorError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self, downloader=None, deletetempfiles=False):
PostProcessor.__init__(self, downloader)
self._versions = self.get_versions()
self._deletetempfiles = deletetempfiles
def check_version(self):
if not self._executable:
raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.')
required_version = '10-0' if self._uses_avconv() else '1.0'
if is_outdated_version(
self._versions[self._executable], required_version):
warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
self._executable, self._executable, required_version)
if self._downloader:
self._downloader.report_warning(warning)
@staticmethod
def get_versions():
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
return dict((p, get_exe_version(p, args=['-version'])) for p in programs)
@property
def available(self):
return self._executable is not None
@property
def _executable(self):
if self._downloader.params.get('prefer_ffmpeg', False):
prefs = ('ffmpeg', 'avconv')
else:
prefs = ('avconv', 'ffmpeg')
for p in prefs:
if self._versions[p]:
return p
return None
@property
def _probe_executable(self):
if self._downloader.params.get('prefer_ffmpeg', False):
prefs = ('ffprobe', 'avprobe')
else:
prefs = ('avprobe', 'ffprobe')
for p in prefs:
if self._versions[p]:
return p
return None
def _uses_avconv(self):
return self._executable == 'avconv'
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
self.check_version()
oldest_mtime = min(
os.stat(encodeFilename(path)).st_mtime for path in input_paths)
files_cmd = []
for path in input_paths:
files_cmd.extend([encodeArgument('-i'), encodeFilename(path, True)])
cmd = ([encodeFilename(self._executable, True), encodeArgument('-y')] +
files_cmd +
[encodeArgument(o) for o in opts] +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode('utf-8', 'replace')
msg = stderr.strip().split('\n')[-1]
raise FFmpegPostProcessorError(msg)
os.utime(encodeFilename(out_path), (oldest_mtime, oldest_mtime))
if self._deletetempfiles:
for ipath in input_paths:
os.remove(ipath)
def run_ffmpeg(self, path, out_path, opts):
self.run_ffmpeg_multiple_files([path], out_path, opts)
def _ffmpeg_filename_argument(self, fn):
# ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
if fn.startswith('-'):
return './' + fn
return fn
class FFmpegExtractAudioPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
FFmpegPostProcessor.__init__(self, downloader)
if preferredcodec is None:
preferredcodec = 'best'
self._preferredcodec = preferredcodec
self._preferredquality = preferredquality
self._nopostoverwrites = nopostoverwrites
def get_audio_codec(self, path):
if not self._probe_executable:
raise PostProcessingError('ffprobe or avprobe not found. Please install one.')
try:
cmd = [
encodeFilename(self._probe_executable, True),
encodeArgument('-show_streams'),
encodeFilename(self._ffmpeg_filename_argument(path), True)]
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
output = handle.communicate()[0]
if handle.wait() != 0:
return None
except (IOError, OSError):
return None
audio_codec = None
for line in output.decode('ascii', 'ignore').split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
return None
def run_ffmpeg(self, path, out_path, codec, more_opts):
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
def run(self, information):
path = information['filepath']
filecodec = self.get_audio_codec(path)
if filecodec is None:
raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
uses_avconv = self._uses_avconv()
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
# Lossless, but in another container
acodec = 'copy'
extension = 'm4a'
more_opts = ['-bsf:a' if uses_avconv else '-absf', 'aac_adtstoasc']
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
if filecodec == 'vorbis':
extension = 'ogg'
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = []
if self._preferredquality is not None:
if int(self._preferredquality) < 10:
more_opts += ['-q:a' if uses_avconv else '-aq', self._preferredquality]
else:
more_opts += ['-b:a' if uses_avconv else '-ab', self._preferredquality + 'k']
else:
# We convert the audio (lossy)
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
extension = self._preferredcodec
more_opts = []
if self._preferredquality is not None:
# The opus codec doesn't support the -aq option
if int(self._preferredquality) < 10 and extension != 'opus':
more_opts += ['-q:a' if uses_avconv else '-aq', self._preferredquality]
else:
more_opts += ['-b:a' if uses_avconv else '-ab', self._preferredquality + 'k']
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a':
more_opts += ['-bsf:a' if uses_avconv else '-absf', 'aac_adtstoasc']
if self._preferredcodec == 'vorbis':
extension = 'ogg'
if self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if new_path == path:
self._nopostoverwrites = True
try:
if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
self._downloader.to_screen('[youtube] Post-process file %s exists, skipping' % new_path)
else:
self._downloader.to_screen('[' + self._executable + '] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except:
etype, e, tb = sys.exc_info()
if isinstance(e, AudioConversionError):
msg = 'audio conversion failed: ' + e.msg
else:
msg = 'error running ' + self._executable
raise PostProcessingError(msg)
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
try:
os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
except:
self._downloader.report_warning('Cannot update utime of audio file')
information['filepath'] = new_path
return self._nopostoverwrites, information
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertorPP, self).__init__(downloader)
self._preferedformat = preferedformat
def run(self, information):
path = information['filepath']
prefix, sep, ext = path.rpartition('.')
outpath = prefix + sep + self._preferedformat
if information['ext'] == self._preferedformat:
self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
return True, information
self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
self.run_ffmpeg(path, outpath, [])
information['filepath'] = outpath
information['format'] = self._preferedformat
information['ext'] = self._preferedformat
return False, information
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
def __init__(self, downloader=None, subtitlesformat='srt'):
super(FFmpegEmbedSubtitlePP, self).__init__(downloader)
self._subformat = subtitlesformat
@classmethod
def _conver_lang_code(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
def run(self, information):
if information['ext'] != 'mp4':
self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4 files')
return True, information
if not information.get('subtitles'):
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed')
return True, information
sub_langs = [key for key in information['subtitles']]
filename = information['filepath']
input_files = [filename] + [subtitles_filename(filename, lang, self._subformat) for lang in sub_langs]
opts = [
'-map', '0',
'-c', 'copy',
# Don't copy the existing subtitles, we may be running the
# postprocessor a second time
'-map', '-0:s',
'-c:s', 'mov_text',
]
for (i, lang) in enumerate(sub_langs):
opts.extend(['-map', '%d:0' % (i + 1)])
lang_code = self._conver_lang_code(lang)
if lang_code is not None:
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
temp_filename = prepend_extension(filename, 'temp')
self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return True, information
class FFmpegMetadataPP(FFmpegPostProcessor):
def run(self, info):
metadata = {}
if info.get('title') is not None:
metadata['title'] = info['title']
if info.get('upload_date') is not None:
metadata['date'] = info['upload_date']
if info.get('uploader') is not None:
metadata['artist'] = info['uploader']
elif info.get('uploader_id') is not None:
metadata['artist'] = info['uploader_id']
if info.get('description') is not None:
metadata['description'] = info['description']
metadata['comment'] = info['description']
if info.get('webpage_url') is not None:
metadata['purl'] = info['webpage_url']
if not metadata:
self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
return True, info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
if info['ext'] == 'm4a':
options = ['-vn', '-acodec', 'copy']
else:
options = ['-c', 'copy']
for (name, value) in metadata.items():
options.extend(['-metadata', '%s=%s' % (name, value)])
self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return True, info
class FFmpegMergerPP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0']
self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename)
self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args)
return True, info
class FFmpegAudioFixPP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-vn', '-acodec', 'copy']
self._downloader.to_screen('[ffmpeg] Fixing audio file "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return True, info
class FFmpegFixupStretchedPP(FFmpegPostProcessor):
def run(self, info):
stretched_ratio = info.get('stretched_ratio')
if stretched_ratio is None or stretched_ratio == 1:
return True, info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio]
self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return True, info
class FFmpegFixupM4aPP(FFmpegPostProcessor):
def run(self, info):
if info.get('container') != 'm4a_dash':
return True, info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4']
self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return True, info
| |
"""
plot_grid.py
Class instance used to make Display.
"""
# Load the needed packages
import numpy as np
import os
import pyart
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as \
NavigationToolbar
from matplotlib.figure import Figure
from matplotlib.colors import Normalize as mlabNormalize
from matplotlib.colorbar import ColorbarBase as mlabColorbarBase
from matplotlib.pyplot import cm
from ..core import Variable, Component, common, VariableChoose, QtGui, QtCore
from ..core.points import Points
# Save image file type and DPI (resolution)
IMAGE_EXT = 'png'
DPI = 200
# ========================================================================
class GridDisplay(Component):
'''
Class to create a display plot, using a Grid structure.
'''
Vgrid = None #: see :ref:`shared_variable`
Vfield = None #: see :ref:`shared_variable`
VlevelZ = None \
#: see :ref:`shared_variable`, only used if plot_type="gridZ"
VlevelY = None \
#: see :ref:`shared_variable`, only used if plot_type="gridY"
VlevelX = None \
#: see :ref:`shared_variable`, only used if plot_type="gridX"
Vcmap = None #: see :ref:`shared_variable`
VplotAxes = None #: see :ref:`shared_variable` (no internal use)
VpathInteriorFunc = None #: see :ref:`shared_variable` (no internal use)
@classmethod
def guiStart(self, parent=None):
'''Graphical interface for starting this class'''
args = _DisplayStart().startDisplay()
args['parent'] = parent
return self(**args), True
def __init__(self, Vgrid=None, Vfield=None, VlevelZ=None, VlevelY=None,
VlevelX=None, Vlims=None, Vcmap=None, plot_type="gridZ",
name="Display", parent=None):
'''
Initialize the class to create display.
Parameters
----------
[Optional]
Vgrid : :py:class:`~artview.core.core.Variable` instance
grid signal variable. If None start new one with None.
Vfield : :py:class:`~artview.core.core.Variable` instance
Field signal variable. If None start new one with empty string.
VlevelZ : :py:class:`~artview.core.core.Variable` instance
Signal variable for vertical level, only used if
plot_type="gridZ". If None start with value zero.
VlevelY : :py:class:`~artview.core.core.Variable` instance
Signal variable for latitudinal level, only used if
plot_type="gridY". If None start with value zero.
VlevelX : :py:class:`~artview.core.core.Variable` instance
Signal variable for longitudinal level, only used if
plot_type="gridX". If None start with value zero.
Vlims : :py:class:`~artview.core.core.Variable` instance
Limits signal variable.
A value of None will instantiate a limits variable.
Vcmap : :py:class:`~artview.core.core.Variable` instance
Colormap signal variable.
A value of None will instantiate a colormap variable.
plot_type : "gridZ", "gridY" or "gridX"
Define plot type, "gridZ" will plot a Z level, that is a XY
plane. Analog for "gridY" and "gridZ"
name : string
Display window name.
parent : PyQt instance
Parent instance to associate to Display window.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
Notes
-----
This class records the selected button and passes the
change value back to variable.
'''
super(GridDisplay, self).__init__(name=name, parent=parent)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.basemap = None
# Set up signal, so that DISPLAY can react to
# external (or internal) changes in grid, field,
# lims and level (expected to be Core.Variable instances)
# The capital V so people remember using ".value"
if Vgrid is None:
self.Vgrid = Variable(None)
else:
self.Vgrid = Vgrid
if Vfield is None:
self.Vfield = Variable('')
else:
self.Vfield = Vfield
if VlevelZ is None:
self.VlevelZ = Variable(0)
else:
self.VlevelZ = VlevelZ
if VlevelY is None:
self.VlevelY = Variable(0)
else:
self.VlevelY = VlevelY
if VlevelX is None:
self.VlevelX = Variable(0)
else:
self.VlevelX = VlevelX
if Vlims is None:
self.Vlims = Variable(None)
else:
self.Vlims = Vlims
if Vcmap is None:
self.Vcmap = Variable(None)
else:
self.Vcmap = Vcmap
self.VpathInteriorFunc = Variable(self.getPathInteriorValues)
self.VplotAxes = Variable(None)
self.sharedVariables = {"Vgrid": self.Newgrid,
"Vfield": self.NewField,
"Vlims": self.NewLims,
"Vcmap": self.NewCmap,
"VpathInteriorFunc": None,
"VplotAxes": None}
self.change_plot_type(plot_type)
# Connect the components
self.connectAllVariables()
# Set plot title and colorbar units to defaults
self.title = self._get_default_title()
self.units = self._get_default_units()
# set default latlon lines
self.lat_lines = np.linspace(-90, 90, num=181)
self.lon_lines = np.linspace(-180, 180, num=361)
# Find the PyArt colormap names
self.cm_names = ["pyart_" + m for m in pyart.graph.cm.datad
if not m.endswith("_r")]
self.cm_names.sort()
# Create tool dictionary
self.tools = {}
# Set up Default limits and cmap
if Vlims is None:
self._set_default_limits(strong=False)
if Vcmap is None:
self._set_default_cmap(strong=False)
# Create a figure for output
self._set_fig_ax()
# Launch the GUI interface
self.LaunchGUI()
# Initialize grid variable
self.Newgrid(None, None, True)
self._update_fig_ax()
self.show()
def keyPressEvent(self, event):
'''Allow level adjustment via the Up-Down arrow keys.'''
if event.key() == QtCore.Qt.Key_Up:
self.LevelSelectCmd(self.Vlevel.value + 1)
elif event.key() == QtCore.Qt.Key_Down:
self.LevelSelectCmd(self.Vlevel.value - 1)
else:
super(GridDisplay, self).keyPressEvent(event)
####################
# GUI methods #
####################
def LaunchGUI(self):
'''Launches a GUI interface.'''
# Create layout
self.layout = QtGui.QGridLayout()
self.layout.setSpacing(8)
# Create the widget
self.central_widget = QtGui.QWidget()
self.setCentralWidget(self.central_widget)
self._set_figure_canvas()
self.central_widget.setLayout(self.layout)
# Add buttons along display for user control
self.addButtons()
self.setUILayout()
# Set the status bar to display messages
self.statusbar = self.statusBar()
##################################
# User display interface methods #
##################################
def addButtons(self):
'''Add a series of buttons for user control over display.'''
# Create the Display controls
self._add_displayBoxUI()
# Create the Level controls
self._add_levelBoxUI()
# Create the Field controls
self._add_fieldBoxUI()
# Create the Tools controls
self._add_toolsBoxUI()
# Create the Informational label at top
self._add_infolabel()
def setUILayout(self):
'''Setup the button/display UI layout.'''
self.layout.addWidget(self.levelBox, 0, 0)
self.layout.addWidget(self.fieldBox, 0, 1)
self.layout.addWidget(self.dispButton, 0, 2)
self.layout.addWidget(self.toolsButton, 0, 3)
self.layout.addWidget(self.infolabel, 0, 4)
#############################
# Functionality methods #
#############################
def _open_LimsDialog(self):
'''Open a dialog box to change display limits.'''
from .limits import limits_dialog
limits, cmap, change = limits_dialog(self.Vlims.value,
self.Vcmap.value, self.name)
if change == 1:
self.Vcmap.change(cmap, False)
self.Vlims.change(limits)
def _fillLevelBox(self):
'''Fill in the Level Window Box with current levels.'''
self.levelBox.clear()
self.levelBox.addItem("Level Window")
# Loop through and create each level button
if self.plot_type == "gridZ":
levels = self.Vgrid.value.axes['z_disp']['data']
elif self.plot_type == "gridY":
levels = self.Vgrid.value.axes['y_disp']['data']
elif self.plot_type == "gridX":
levels = self.Vgrid.value.axes['x_disp']['data']
for nlevel in range(len(levels)):
btntxt = "%2.1f m (level %d)" % (levels[nlevel], nlevel+1)
self.levelBox.addItem(btntxt)
def _fillFieldBox(self):
'''Fill in the Field Window Box with current variable names.'''
self.fieldBox.clear()
self.fieldBox.addItem("Field Window")
# Loop through and create each field button
for field in self.fieldnames:
self.fieldBox.addItem(field)
def _levelAction(self, text):
'''Define action for Level Button selection.'''
if text == "Level Window":
self._open_levelbuttonwindow()
else:
nlevel = int(text.split("(level ")[1][:-1])-1
self.LevelSelectCmd(nlevel)
def _fieldAction(self, text):
'''Define action for Field Button selection.'''
if text == "Field Window":
self._open_fieldbuttonwindow()
else:
self.FieldSelectCmd(str(text))
def _title_input(self):
'''Retrieve new plot title.'''
val, entry = common.string_dialog_with_reset(
self.title, "Plot Title", "Title:", self._get_default_title())
if entry is True:
self.title = val
self._update_plot()
def _units_input(self):
'''Retrieve new plot units.'''
val, entry = common.string_dialog_with_reset(
self.units, "Plot Units", "Units:", self._get_default_units())
if entry is True:
self.units = val
self._update_plot()
def _open_levelbuttonwindow(self):
'''Open a LevelButtonWindow instance.'''
from .level import LevelButtonWindow
if self.plot_type == "gridZ":
self.levelbuttonwindow = LevelButtonWindow(
self.Vlevel, self.plot_type, Vcontainer=self.Vgrid,
controlType="radio", name=self.name+" Level Selection",
parent=self.parent)
else:
self.levelbuttonwindow = LevelButtonWindow(
self.Vlevel, self.plot_type, Vcontainer=self.Vgrid,
controlType="slider", name=self.name+" Level Selection",
parent=self.parent)
def _open_fieldbuttonwindow(self):
'''Open a FieldButtonWindow instance.'''
from .field import FieldButtonWindow
self.fieldbuttonwindow = FieldButtonWindow(
self.Vgrid, self.Vfield,
name=self.name+" Field Selection", parent=self.parent)
def _add_cmaps_to_button(self):
'''Add a menu to change colormap used for plot.'''
for cm_name in self.cm_names:
cmapAction = self.dispCmapmenu.addAction(cm_name)
cmapAction.setStatusTip("Use the %s colormap" % cm_name)
cmapAction.triggered[()].connect(
lambda cm_name=cm_name: self.cmapSelectCmd(cm_name))
self.dispCmap.setMenu(self.dispCmapmenu)
def _add_displayBoxUI(self):
'''Create the Display Options Button menu.'''
self.dispButton = QtGui.QPushButton("Display Options")
self.dispButton.setToolTip("Adjust display properties")
self.dispButton.setFocusPolicy(QtCore.Qt.NoFocus)
dispmenu = QtGui.QMenu(self)
dispLimits = dispmenu.addAction("Adjust Display Limits")
dispLimits.setToolTip("Set data, X, and Y range limits")
dispTitle = dispmenu.addAction("Change Title")
dispTitle.setToolTip("Change plot title")
dispUnit = dispmenu.addAction("Change Units")
dispUnit.setToolTip("Change units string")
self.dispCmap = dispmenu.addAction("Change Colormap")
self.dispCmapmenu = QtGui.QMenu("Change Cmap")
self.dispCmapmenu.setFocusPolicy(QtCore.Qt.NoFocus)
dispQuickSave = dispmenu.addAction("Quick Save Image")
dispQuickSave.setShortcut("Ctrl+D")
dispQuickSave.setToolTip(
"Save Image to local directory with default name")
dispSaveFile = dispmenu.addAction("Save Image")
dispSaveFile.setShortcut("Ctrl+S")
dispSaveFile.setStatusTip("Save Image using dialog")
dispLimits.triggered[()].connect(self._open_LimsDialog)
dispTitle.triggered[()].connect(self._title_input)
dispUnit.triggered[()].connect(self._units_input)
dispQuickSave.triggered[()].connect(self._quick_savefile)
dispSaveFile.triggered[()].connect(self._savefile)
self._add_cmaps_to_button()
self.dispButton.setMenu(dispmenu)
def _add_levelBoxUI(self):
'''Create the Level Selection ComboBox.'''
self.levelBox = QtGui.QComboBox()
self.levelBox.setFocusPolicy(QtCore.Qt.NoFocus)
self.levelBox.setToolTip(
"Select level slice to display.\n"
"'Level Window' will launch popup.\n"
"Up/Down arrow keys Increase/Decrease level.")
self.levelBox.activated[str].connect(self._levelAction)
def _add_fieldBoxUI(self):
'''Create the Field Selection ComboBox.'''
self.fieldBox = QtGui.QComboBox()
self.fieldBox.setFocusPolicy(QtCore.Qt.NoFocus)
self.fieldBox.setToolTip("Select variable/field in data file.\n"
"'Field Window' will launch popup.\n")
self.fieldBox.activated[str].connect(self._fieldAction)
def _add_toolsBoxUI(self):
'''Create the Tools Button menu.'''
self.toolsButton = QtGui.QPushButton("Toolbox")
self.toolsButton.setFocusPolicy(QtCore.Qt.NoFocus)
self.toolsButton.setToolTip("Choose a tool to apply")
toolmenu = QtGui.QMenu(self)
toolZoomPan = toolmenu.addAction("Zoom/Pan")
toolValueClick = toolmenu.addAction("Click for Value")
toolSelectRegion = toolmenu.addAction("Select a Region of Interest")
toolReset = toolmenu.addAction("Reset Tools")
toolDefault = toolmenu.addAction("Reset File Defaults")
toolZoomPan.triggered[()].connect(self.toolZoomPanCmd)
toolValueClick.triggered[()].connect(self.toolValueClickCmd)
toolSelectRegion.triggered[()].connect(self.toolSelectRegionCmd)
toolReset.triggered[()].connect(self.toolResetCmd)
toolDefault.triggered[()].connect(self.toolDefaultCmd)
self.toolsButton.setMenu(toolmenu)
def _add_infolabel(self):
'''Create an information label about the display'''
self.infolabel = QtGui.QLabel("Grid: \n"
"Field: \n"
"Level: ", self)
self.infolabel.setStyleSheet('color: red; font: italic 10px')
self.infolabel.setToolTip("Filename not loaded")
def _update_infolabel(self):
if self.Vgrid.value is None:
return
self.infolabel.setText(
"Grid: %s\n"
"Field: %s\n"
"Level: %d" % (self.Vgrid.value.metadata['instrument_name'],
self.Vfield.value,
self.Vlevel.value+1))
if hasattr(self.Vgrid.value, 'filename'):
self.infolabel.setToolTip(self.Vgrid.value.filename)
########################
# Selectionion methods #
########################
def Newgrid(self, variable, value, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vgrid <artview.core.core.Variable>`.
This will:
* Update fields and levels lists and MenuBoxes
* Check grid scan type and reset limits if needed
* Reset units and title
* If strong update: update plot
'''
# test for None
if self.Vgrid.value is None:
self.fieldBox.clear()
self.levelBox.clear()
return
# Get field names
self.fieldnames = self.Vgrid.value.fields.keys()
# Check the file type and initialize limts
self._check_file_type()
# Update field and level MenuBox
self._fillLevelBox()
self._fillFieldBox()
self.units = self._get_default_units()
self.title = self._get_default_title()
if strong:
self._update_plot()
self._update_infolabel()
def NewField(self, variable, value, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vfield <artview.core.core.Variable>`.
This will:
* Reset colormap
* Reset units
* Update fields MenuBox
* If strong update: update plot
'''
self._set_default_cmap(strong=False)
self.units = self._get_default_units()
self.title = self._get_default_title()
idx = self.fieldBox.findText(value)
self.fieldBox.setCurrentIndex(idx)
if strong:
self._update_plot()
self._update_infolabel()
def NewLims(self, variable, value, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vlims <artview.core.core.Variable>`.
This will:
* If strong update: update axes
'''
if strong:
self._update_axes()
def NewCmap(self, variable, value, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vcmap <artview.core.core.Variable>`.
This will:
* If strong update: update plot
'''
if strong:
self._update_plot()
def NewLevel(self, variable, value, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vlevel* <artview.core.core.Variable>`.
This will:
* Update level MenuBox
* If strong update: update plot
'''
# +1 since the first one is "Level Window"
self.levelBox.setCurrentIndex(value+1)
if strong:
self._update_plot()
self._update_infolabel()
def LevelSelectCmd(self, nlevel):
'''
Captures Level selection and update Level
:py:class:`~artview.core.core.Variable`.
'''
if nlevel < 0:
nlevel = len(self.levels)-1
elif nlevel >= len(self.levels):
nlevel = 0
self.Vlevel.change(nlevel)
def FieldSelectCmd(self, name):
'''
Captures field selection and update field
:py:class:`~artview.core.core.Variable`.
'''
self.Vfield.change(name)
def cmapSelectCmd(self, cm_name):
'''Captures colormap selection and redraws.'''
CMAP = cm_name
self.Vcmap.value['cmap'] = cm_name
self.Vcmap.update()
def toolZoomPanCmd(self):
'''Creates and connects to a Zoom/Pan instance.'''
from .tools import ZoomPan
scale = 1.1
self.tools['zoompan'] = ZoomPan(
self.Vlims, self.ax,
base_scale=scale, parent=self.parent)
self.tools['zoompan'].connect()
def toolValueClickCmd(self):
'''Creates and connects to Point-and-click value retrieval'''
from .pick_value import ValueClick
self.tools['valueclick'] = ValueClick(
self, name=self.name + "ValueClick", parent=self)
def toolSelectRegionCmd(self):
'''Creates and connects to Region of Interest instance.'''
from .select_region_old import SelectRegion
self.tools['select_region'] = SelectRegion(
self.VplotAxes, self.VpathInteriorFunc, self.Vfield,
name=self.name + " SelectRegion", parent=self)
def toolResetCmd(self):
'''Reset tools via disconnect.'''
from . import tools
self.tools = tools.reset_tools(self.tools)
def toolDefaultCmd(self):
'''Restore the Display defaults.'''
for key in self.tools.keys():
if self.tools[key] is not None:
self.tools[key].disconnect()
self.tools[key] = None
self._set_default_limits()
self._set_default_cmap()
def getPathInteriorValues(self, paths):
'''
Return the bins values path.
Parameters
----------
paths : list of :py:class:`matplotlib.path.Path` instances
Returns
-------
points : :py:class`artview.core.points.Points`
Points object containing all bins of the current grid
and level inside path. Axes : 'x_disp', 'y_disp', 'x_disp',
'x_index', 'y_index', 'z_index'. Fields: just current field
Notes
-----
If Vgrid.value is None, returns None
'''
from .tools import interior_grid
grid = self.Vgrid.value
if grid is None:
return None
try:
iter(paths)
except:
paths = [paths]
xy = np.empty((0, 2))
idx = np.empty((0, 2), dtype=np.int)
for path in paths:
_xy, _idx = interior_grid(path, grid, self.basemap,
self.Vlevel.value, self.plot_type)
xy = np.concatenate((xy, _xy))
idx = np.concatenate((idx, _idx))
if self.plot_type == "gridZ":
x = xy[:, 0]
y = xy[:, 1]
z = np.ones_like(xy[:, 0]) * self.levels[self.VlevelZ.value]
x_idx = idx[:, 0]
y_idx = idx[:, 1]
z_idx = np.ones_like(idx[:, 0]) * self.VlevelZ.value
elif self.plot_type == "gridY":
x = xy[:, 0] * 1000.
z = xy[:, 1] * 1000.
y = np.ones_like(xy[:, 0]) * self.levels[self.VlevelY.value]
x_idx = idx[:, 0]
z_idx = idx[:, 1]
y_idx = np.ones_like(idx[:, 0]) * self.VlevelY.value
elif self.plot_type == "gridX":
z = xy[:, 0] * 1000.
y = xy[:, 1] * 1000.
x = np.ones_like(xy[:, 0]) * self.levels[self.VlevelX.value]
z_idx = idx[:, 0]
y_idx = idx[:, 1]
x_idx = np.ones_like(idx[:, 0]) * self.VlevelX.value
xaxis = {'data': x,
'long_name': 'X-coordinate in Cartesian system',
'axis': 'X',
'units': 'm'}
yaxis = {'data': y,
'long_name': 'Y-coordinate in Cartesian system',
'axis': 'Y',
'units': 'm'}
zaxis = {'data': z,
'long_name': 'Z-coordinate in Cartesian system',
'axis': 'Z',
'units': 'm'}
field = grid.fields[self.Vfield.value].copy()
field['data'] = grid.fields[self.Vfield.value]['data'][
z_idx, y_idx, x_idx]
x_idx = {'data': x_idx,
'long_name': 'index in nx dimension'}
y_idx = {'data': y_idx,
'long_name': 'index in ny dimension'}
z_idx = {'data': z_idx,
'long_name': 'index in nz dimension'}
axes = {'x_disp': xaxis,
'y_disp': yaxis,
'z_disp': zaxis,
'x_index': x_idx,
'y_index': y_idx,
'z_index': z_idx, }
fields = {self.Vfield.value: field}
points = Points(fields, axes, grid.metadata.copy(), xy.shape[0])
return points
def getNearestPoints(self, xdata, ydata):
'''
Return the bins values nearest to point.
Parameters
----------
xdata, ydata : float
Returns
-------
x, y, z, value, x_idx, y_idx, z_idx: ndarray
Truplet of 1arrays containing x,y,z coordinate, current field
value, x, y and z index.
Notes
-----
If Vgrid.value is None, returns None
'''
from .tools import nearest_point_grid
grid = self.Vgrid.value
# map center
lat0 = self.Vgrid.value.axes['lat']['data'][0]
lon0 = self.Vgrid.value.axes['lon']['data'][0]
if grid is None:
return (np.array([]),)*7
if self.plot_type == "gridZ":
idx = nearest_point_grid(
grid, self.basemap, self.levels[self.VlevelZ.value], ydata,
xdata)
elif self.plot_type == "gridY":
idx = nearest_point_grid(
grid, self.basemap, ydata * 1000.,
self.levels[self.VlevelY.value], xdata * 1000.)
elif self.plot_type == "gridX":
idx = nearest_point_grid(
grid, self.basemap, ydata * 1000., xdata * 1000.,
self.levels[self.VlevelX.value])
aux = (grid.axes['x_disp']['data'][idx[:, 2]],
grid.axes['y_disp']['data'][idx[:, 1]],
grid.axes['z_disp']['data'][idx[:, 0]],
grid.fields[self.Vfield.value]['data'][idx[:, 0], idx[:, 1],
idx[:, 2]],
idx[:, 2], idx[:, 1], idx[:, 0])
return aux
####################
# Plotting methods #
####################
def _set_fig_ax(self):
'''Set the figure and axis to plot.'''
self.XSIZE = 8
self.YSIZE = 8
self.fig = Figure(figsize=(self.XSIZE, self.YSIZE))
self.ax = self.fig.add_axes([0.2, 0.2, 0.7, 0.7])
self.cax = self.fig.add_axes([0.2, 0.10, 0.7, 0.02])
self.VplotAxes.change(self.ax)
# self._update_axes()
def _update_fig_ax(self):
'''Set the figure and axis to plot.'''
if self.plot_type in ("gridX", "gridY"):
self.YSIZE = 5
else:
self.YSIZE = 8
xwidth = 0.7
yheight = 0.7
self.ax.set_position([0.15, 0.15, xwidth, yheight])
self.cax.set_position([0.15+xwidth, 0.15, 0.02, yheight])
self._update_axes()
def _set_figure_canvas(self):
'''Set the figure canvas to draw in window area.'''
self.canvas = FigureCanvasQTAgg(self.fig)
# Add the widget to the canvas
self.layout.addWidget(self.canvas, 1, 0, 7, 6)
def _update_plot(self):
'''Draw/Redraw the plot.'''
if self.Vgrid.value is None:
return
# Create the plot with PyArt GridMapDisplay
self.ax.cla() # Clear the plot axes
self.cax.cla() # Clear the colorbar axes
if self.Vfield.value not in self.Vgrid.value.fields.keys():
self.canvas.draw()
self.statusbar.setStyleSheet("QStatusBar{padding-left:8px;" +
"background:rgba(255,0,0,255);" +
"color:black;font-weight:bold;}")
self.statusbar.showMessage("Field not Found in Radar", msecs=5000)
return
else:
self.statusbar.setStyleSheet("QStatusBar{padding-left:8px;" +
"background:rgba(0,0,0,0);" +
"color:black;font-weight:bold;}")
self.statusbar.clearMessage()
title = self.title
limits = self.Vlims.value
cmap = self.Vcmap.value
self.display = pyart.graph.GridMapDisplay(self.Vgrid.value)
# Create Plot
if self.plot_type == "gridZ":
self.display.plot_basemap(
self.lat_lines, self.lon_lines, ax=self.ax)
self.basemap = self.display.get_basemap()
self.plot = self.display.plot_grid(
self.Vfield.value, self.VlevelZ.value, vmin=cmap['vmin'],
vmax=cmap['vmax'], cmap=cmap['cmap'], colorbar_flag=False,
title=title, ax=self.ax, fig=self.fig)
elif self.plot_type == "gridY":
self.basemap = None
self.plot = self.display.plot_latitudinal_level(
self.Vfield.value, self.VlevelY.value, vmin=cmap['vmin'],
vmax=cmap['vmax'], cmap=cmap['cmap'], colorbar_flag=False,
title=title, ax=self.ax, fig=self.fig)
elif self.plot_type == "gridX":
self.basemap = None
self.plot = self.display.plot_longitudinal_level(
self.Vfield.value, self.VlevelX.value, vmin=cmap['vmin'],
vmax=cmap['vmax'], cmap=cmap['cmap'], colorbar_flag=False,
title=title, ax=self.ax, fig=self.fig)
limits = self.Vlims.value
x = self.ax.get_xlim()
y = self.ax.get_ylim()
limits['xmin'] = x[0]
limits['xmax'] = x[1]
limits['ymin'] = y[0]
limits['ymax'] = y[1]
self._update_axes()
norm = mlabNormalize(vmin=cmap['vmin'],
vmax=cmap['vmax'])
self.cbar = mlabColorbarBase(self.cax, cmap=cmap['cmap'],
norm=norm, orientation='vertical')
self.cbar.set_label(self.units)
if self.plot_type == "gridZ":
print("Plotting %s field, Z level %d in %s" % (
self.Vfield.value, self.VlevelZ.value+1, self.name))
elif self.plot_type == "gridY":
print("Plotting %s field, Y level %d in %s" % (
self.Vfield.value, self.VlevelY.value+1, self.name))
elif self.plot_type == "gridX":
print("Plotting %s field, X level %d in %s" % (
self.Vfield.value, self.VlevelX.value+1, self.name))
self.canvas.draw()
def _update_axes(self):
'''Change the Plot Axes.'''
limits = self.Vlims.value
self.ax.set_xlim(limits['xmin'], limits['xmax'])
self.ax.set_ylim(limits['ymin'], limits['ymax'])
self.ax.figure.canvas.draw()
#########################
# Check methods #
#########################
def _set_default_limits(self, strong=True):
'''Set limits to pre-defined default.'''
limits = self.Vlims.value
if limits is None:
limits = {}
if self.Vgrid.value is None:
limits['xmin'] = 0
limits['xmax'] = 1
limits['ymin'] = 0
limits['ymax'] = 1
elif self.plot_type == "gridZ":
if self.basemap is not None:
limits['xmin'] = self.basemap.llcrnrx
limits['xmax'] = self.basemap.urcrnrx
limits['ymin'] = self.basemap.llcrnry
limits['ymax'] = self.basemap.urcrnry
else:
limits['xmin'] = -150
limits['xmax'] = 150
limits['ymin'] = -150
limits['ymax'] = 150
elif self.plot_type == "gridY":
limits['xmin'] = (self.Vgrid.value.axes['x_disp']['data'][0] /
1000.)
limits['xmax'] = (self.Vgrid.value.axes['x_disp']['data'][-1] /
1000.)
limits['ymin'] = (self.Vgrid.value.axes['z_disp']['data'][0] /
1000.)
limits['ymax'] = (self.Vgrid.value.axes['z_disp']['data'][-1] /
1000.)
elif self.plot_type == "gridX":
limits['xmin'] = (self.Vgrid.value.axes['y_disp']['data'][0] /
1000.)
limits['xmax'] = (self.Vgrid.value.axes['y_disp']['data'][-1] /
1000.)
limits['ymin'] = (self.Vgrid.value.axes['z_disp']['data'][0] /
1000.)
limits['ymax'] = (self.Vgrid.value.axes['z_disp']['data'][-1] /
1000.)
self.Vlims.change(limits, strong)
def _set_default_cmap(self, strong=True):
'''Set colormap to pre-defined default.'''
cmap = pyart.config.get_field_colormap(self.Vfield.value)
d = {}
d['cmap'] = cmap
lims = pyart.config.get_field_limits(self.Vfield.value,
self.Vgrid.value)
if lims != (None, None):
d['vmin'] = lims[0]
d['vmax'] = lims[1]
else:
d['vmin'] = -10
d['vmax'] = 65
self.Vcmap.change(d, strong)
def _get_default_title(self):
'''Get default title from pyart.'''
if (self.Vgrid.value is None or
self.Vfield.value not in self.Vgrid.value.fields):
return ''
if self.plot_type == "gridZ":
return pyart.graph.common.generate_grid_title(self.Vgrid.value,
self.Vfield.value,
self.Vlevel.value)
elif self.plot_type == "gridY":
return pyart.graph.common.generate_latitudinal_level_title(
self.Vgrid.value, self.Vfield.value, self.Vlevel.value)
elif self.plot_type == "gridX":
return pyart.graph.common.generate_longitudinal_level_title(
self.Vgrid.value, self.Vfield.value, self.Vlevel.value)
def _get_default_units(self):
'''Get default units for current grid and field.'''
if self.Vgrid.value is not None:
try:
return self.Vgrid.value.fields[self.Vfield.value]['units']
except:
return ''
else:
return ''
def _check_file_type(self):
'''Check file to see if the file type.'''
# self._update_fig_ax()
return
def change_plot_type(self, plot_type):
'''Change plot type.'''
# remove shared variables
for key in ("VlevelZ", "VlevelY", "VlevelX"):
if key in self.sharedVariables.keys():
del self.sharedVariables[key]
if plot_type == "gridZ":
self.sharedVariables["VlevelZ"] = self.NewLevel
elif plot_type == "gridY":
self.sharedVariables["VlevelY"] = self.NewLevel
elif plot_type == "gridX":
self.sharedVariables["VlevelX"] = self.NewLevel
else:
import warnings
warnings.warn('Invalid Plot type %s, reseting to gridZ' %
plot_type)
self.sharedVariables["VlevelZ"] = self.NewLevel
plot_type = "gridZ"
self.plot_type = plot_type
########################
# Image save methods #
########################
def _quick_savefile(self, PTYPE=IMAGE_EXT):
'''Save the current display via PyArt interface.'''
imagename = self.display.generate_filename(
self.Vfield.value, self.Vlevel.value, ext=IMAGE_EXT)
self.canvas.print_figure(os.path.join(os.getcwd(), imagename),
dpi=DPI)
self.statusbar.showMessage('Saved to %s' % os.path.join(os.getcwd(),
imagename))
def _savefile(self, PTYPE=IMAGE_EXT):
'''Save the current display using PyQt dialog interface.'''
imagename = self.display.generate_filename(
self.Vfield.value, self.Vlevel.value, ext=IMAGE_EXT)
file_choices = "PNG (*.png)|*.png"
path = unicode(QtGui.QFileDialog.getSaveFileName(
self, 'Save file', imagename, file_choices))
if path:
self.canvas.print_figure(path, dpi=DPI)
self.statusbar.showMessage('Saved to %s' % path)
########################
# get methods #
########################
def getPlotAxis(self):
''' get :py:class:`matplotlib.axes.Axes` instance of main plot '''
return self.ax
def getStatusBar(self):
''' get :py:class:`PyQt4.QtGui.QStatusBar` instance'''
return self.statusbar
def getField(self):
''' get current field '''
return self.Vfield.value
def getUnits(self):
''' get current units '''
return self.units
########################
# Properties #
########################
@property
def Vlevel(self):
'''Alias to VlevelZ, VlevelY or VlevelX depending on plot_type.'''
if self.plot_type == "gridZ":
return self.VlevelZ
elif self.plot_type == "gridY":
return self.VlevelY
elif self.plot_type == "gridX":
return self.VlevelX
else:
return None
@property
def levels(self):
'''Values from the axes of grid, depending on plot_type.'''
if self.plot_type == "gridZ":
return self.Vgrid.value.axes['z_disp']['data'][:]
elif self.plot_type == "gridY":
return self.Vgrid.value.axes['y_disp']['data'][:]
elif self.plot_type == "gridX":
return self.Vgrid.value.axes['x_disp']['data'][:]
else:
return None
class _DisplayStart(QtGui.QDialog):
'''
Dialog Class for graphical start of display, to be used in guiStart.
'''
def __init__(self):
'''Initialize the class to create the interface.'''
super(_DisplayStart, self).__init__()
self.result = {}
self.layout = QtGui.QGridLayout(self)
# set window as modal
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setupUi()
def chooseGrid(self):
item = VariableChoose().chooseVariable()
if item is None:
return
else:
self.result["Vgrid"] = getattr(item[1], item[2])
def chooseField(self):
item = VariableChoose().chooseVariable()
if item is None:
return
else:
self.result["Vfield"] = getattr(item[1], item[2])
def chooseLevel(self):
item = VariableChoose().chooseVariable()
if item is None:
return
else:
self.result["VlevelZ"] = getattr(item[1], item[2])
def chooseLims(self):
item = VariableChoose().chooseVariable()
if item is None:
return
else:
self.result["Vlims"] = getattr(item[1], item[2])
def setupUi(self):
self.gridButton = QtGui.QPushButton("Find Variable")
self.gridButton.clicked.connect(self.chooseGrid)
self.layout.addWidget(QtGui.QLabel("Vgrid"), 0, 0)
self.layout.addWidget(self.gridButton, 0, 1, 1, 3)
self.plot_type = QtGui.QLineEdit("gridZ")
self.layout.addWidget(QtGui.QLabel("plot_type"), 1, 0)
self.layout.addWidget(self.plot_type, 1, 1, 1, 3)
self.fieldButton = QtGui.QPushButton("Find Variable")
self.fieldButton.clicked.connect(self.chooseField)
self.layout.addWidget(QtGui.QLabel("Vfield"), 2, 0)
self.field = QtGui.QLineEdit("")
self.layout.addWidget(self.field, 2, 1)
self.layout.addWidget(QtGui.QLabel("or"), 2, 2)
self.layout.addWidget(self.fieldButton, 2, 3)
self.levelButton = QtGui.QPushButton("Find Variable")
self.levelButton.clicked.connect(self.chooseLevel)
self.layout.addWidget(QtGui.QLabel("Vlevel"), 3, 0)
self.level = QtGui.QSpinBox()
self.layout.addWidget(self.level, 3, 1)
self.layout.addWidget(QtGui.QLabel("or"), 3, 2)
self.layout.addWidget(self.levelButton, 3, 3)
self.limsButton = QtGui.QPushButton("Find Variable")
self.limsButton.clicked.connect(self.chooseLims)
self.layout.addWidget(QtGui.QLabel("Vlims"), 4, 0)
self.layout.addWidget(self.limsButton, 4, 1, 1, 3)
self.name = QtGui.QLineEdit("GridDisplay")
self.layout.addWidget(QtGui.QLabel("name"), 5, 0)
self.layout.addWidget(self.name, 5, 1, 1, 3)
self.closeButton = QtGui.QPushButton("Start")
self.closeButton.clicked.connect(self.closeDialog)
self.layout.addWidget(self.closeButton, 6, 0, 1, 5)
def closeDialog(self):
self.done(QtGui.QDialog.Accepted)
def startDisplay(self):
self.exec_()
# if no Vgrid abort
if 'Vgrid' not in self.result:
self.result['Vgrid'] = Variable(None)
# common.ShowWarning("Must select a variable for Vgrid.")
# I'm allowing this to continue, but this will result in error
# if Vfield, Vlevel, Vlims were not select create new
field = str(self.field.text())
level = self.level.value()
if 'Vfield' not in self.result:
self.result['Vfield'] = Variable(field)
if 'VlevelZ' not in self.result:
self.result['VlevelZ'] = Variable(level)
self.result['name'] = str(self.name.text())
self.result['plot_type'] = str(self.plot_type.text())
return self.result
| |
#
# This file is part of DroneBridgeLib: https://github.com/seeul8er/DroneBridge
#
# Copyright 2018 Wolfgang Christl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import json
import select
from enum import Enum
from socket import *
from subprocess import call
from DroneBridge.bpf import attach_filter
from DBCommProt import DBCommProt
from db_comm_messages import change_settings, new_settingsresponse_message, comm_message_extract_info, \
comm_crc_correct, create_sys_ident_response, new_error_response_message, \
new_ping_response_message, new_ack_message, change_cam_selection, init_cam_gpios, normalize_jscal_axis
from db_ip_checker import DBIPGetter
class DBPort(Enum):
DB_PORT_CONTROLLER = b'\x01'
DB_PORT_TELEMETRY = b'\x02'
DB_PORT_VIDEO = b'\x03'
DB_PORT_COMMUNICATION = b'\x04'
DB_PORT_STATUS = b'\x05'
DB_PORT_PROXY = b'\x06'
DB_PORT_RC = b'\x07'
class DBDir(Enum):
DB_TO_UAV = b'\x01'
DB_TO_GND = b'\x03'
RADIOTAP_HEADER = b'\x00\x00\x0c\x00\x04\x80\x00\x00\x0c\x00\x18\x00' # 6Mbit transmission speed set with Ralink chips
ETH_TYPE = b"\x88\xAB"
DB_V2_HEADER_LENGTH = 10
UDP_BUFFERSIZE = 2048
MONITOR_BUFFERSIZE = 2048
MONITOR_BUFFERSIZE_COMM = 2048
class DBProtocol:
ip_smartp = "192.168.42.129"
APP_PORT_TEL = 1604
APP_PORT_COMM = 1603
def __init__(self, udp_port_rx, ip_rx, udp_port_smartphone, comm_direction, interface_drone_comm,
mode, communication_id, dronebridge_port, tag=''):
if type(communication_id) is int:
self.comm_id = bytes([communication_id]) # must be the same on drone and groundstation
else:
self.comm_id = communication_id # must be the same on drone and groundstation
assert type(self.comm_id) is bytes
self.udp_port_rx = udp_port_rx # 1604
self.ip_rx = ip_rx
self.udp_port_smartphone = udp_port_smartphone # we bind to that locally
# direction is stored as DBDir
self.comm_direction = comm_direction # set to 0x01 if program runs on groundst. and to 0x03 if runs on drone
assert type(self.comm_direction) is DBDir
self.interface = interface_drone_comm # the long range interface
self.mode = mode
self.tag = tag
if self.mode == 'wifi':
self.short_mode = 'w'
else:
self.short_mode = 'm'
self.fcf = b'\xb4\x00\x00\x00' # RTS frames
# port is stored as byte value
if type(dronebridge_port) is DBPort:
self.db_port = dronebridge_port.value
else:
self.db_port = dronebridge_port
assert type(self.db_port) is bytes
self.comm_sock = self._open_comm_sock()
# dirty fix till we do some proper code cleanup!
if self.comm_direction == DBDir.DB_TO_UAV and (self.db_port == DBPort.DB_PORT_TELEMETRY.value or
self.db_port == DBPort.DB_PORT_COMMUNICATION.value):
self.android_sock = self._open_android_udpsocket()
self.ipgetter = DBIPGetter()
self.changed = False
self.signal = 0 # signal quality that is measured [dBm]
self.first_run = True
self.seq_num = 0
init_cam_gpios()
def receive_from_db(self, custom_timeout=1.5):
"""Check if new data from the drone arrived and return packet payload. Default timeout is 1.5s"""
if self.mode == 'wifi':
try:
data, addr = self.comm_sock.recvfrom(UDP_BUFFERSIZE)
return data
except Exception as e:
print(
self.tag + str(e) + ": Drone is not ready or has wrong IP address of groundstation. Sending hello")
return False
else:
try:
readable, writable, exceptional = select.select([self.comm_sock], [], [], custom_timeout)
if readable:
data = self.parse_packet(bytearray(self.comm_sock.recv(MONITOR_BUFFERSIZE_COMM)))
if data != False:
return data
except timeout as t:
print(self.tag + str(t) + "Socket timed out. No response received from drone (monitor mode)")
return False
except Exception as e:
print(self.tag + str(e) + ": Error receiving data form drone (monitor mode)")
return False
def receive_process_datafromgroundstation(self):
"""Check if new data from the groundstation arrived and process the packet - do not use for custom data!"""
# check if the socket received something and process data
if self.mode == "wifi":
readable, writable, exceptional = select.select([self.comm_sock], [], [], 0)
if readable:
data, addr = self.comm_sock.recvfrom(UDP_BUFFERSIZE)
if data.decode() == "tx_hello_packet":
self.ip_rx = addr[0]
print(self.tag + "Updated goundstation IP-address to: " + str(self.ip_rx))
else:
print(self.tag + "New data from groundstation: " + data.decode())
else:
if self.db_port == DBPort.DB_PORT_TELEMETRY.value:
# socket is non-blocking - return if nothing there and keep sending telemetry
readable, writable, exceptional = select.select([self.comm_sock], [], [], 0)
if readable:
# just get RSSI of radiotap header
self.parse_packet(bytes(self.comm_sock.recv(MONITOR_BUFFERSIZE)))
else:
if self.first_run:
self._clear_monitor_comm_socket_buffer()
self.first_run = False
db_comm_prot_request = self.parse_packet(bytes(self.comm_sock.recv(MONITOR_BUFFERSIZE_COMM)))
if db_comm_prot_request != False:
try:
if not self._route_db_comm_protocol(db_comm_prot_request):
print(self.tag + "smartphone request could not be processed correctly")
except (UnicodeDecodeError, ValueError):
print(self.tag + "Received message from groundstation with error. Not UTF error or ValueError")
def process_smartphonerequests(self, last_keepalive):
"""See if smartphone told the groundstation to do something. Returns recent keep-alive time"""
r, w, e = select.select([self.android_sock], [], [], 0)
if r:
smartph_data, android_addr = self.android_sock.recvfrom(UDP_BUFFERSIZE)
return self._process_smartphone_command(smartph_data, last_keepalive)
return last_keepalive
def sendto_smartphone(self, raw_data, port):
"""Sends data to smartphone. Socket is nonblocking so we need to wait till it becomes"""
self.ip_smartp = self.ipgetter.return_smartphone_ip()
while True:
r, w, e = select.select([], [self.android_sock], [], 0)
if w:
try:
return self.android_sock.sendto(raw_data, (self.ip_smartp, port))
except:
print(
self.tag + "Could not send to smartphone (" + self.ip_smartp + "). Make sure it is connected.")
return 0
def sendto_groundstation(self, data_bytes, db_port):
"""Call this function to send stuff to the groundstation"""
if type(db_port) is DBPort:
db_port = db_port.value
if self.mode == "wifi":
num = self._sendto_tx_wifi(data_bytes)
else:
num = self._send_monitor(data_bytes, db_port, DBDir.DB_TO_GND.value)
return num
def sendto_uav(self, data_bytes, db_port):
"""Call this function to send stuff to the drone!"""
if type(db_port) is DBPort:
db_port = db_port.value
if self.mode == "wifi":
num = self._sendto_rx_wifi(data_bytes, db_port)
else:
num = self._send_monitor(data_bytes, db_port, DBDir.DB_TO_UAV.value)
return num
def send_beacon(self):
self.sendto_uav('groundstation_beacon'.encode(), DBPort.DB_PORT_TELEMETRY.value)
def update_routing_gopro(self):
print(self.tag + "Update iptables to send GoPro stream to " + str(self.ip_rx))
if self.changed:
call("iptables -t nat -R PREROUTING 1 -p udp --dport 8554 -j DNAT --to " + str(self.ip_rx))
else:
call("iptables -t nat -I PREROUTING 1 -p udp --dport 8554 -j DNAT --to " + str(self.ip_rx))
self.changed = True
def set_raw_sock_blocking(self, is_blocking):
self.comm_sock.setblocking(is_blocking)
def getsmartphonesocket(self):
return self.android_sock
def getcommsocket(self):
return self.comm_sock
@staticmethod
def parse_packet(packet):
"""Pars DroneBridgeLib raw protocol v2. Returns False if not OK or return packet payload if it is!"""
rth_length = packet[2]
db_v2_payload_length = int.from_bytes(packet[(rth_length + 7):(rth_length + 8)] +
packet[(rth_length + 8):(rth_length + 9)],
byteorder='little', signed=False)
payload_start = rth_length + DB_V2_HEADER_LENGTH
return packet[payload_start:(payload_start + db_v2_payload_length)]
def _process_smartphone_command(self, raw_data, thelast_keepalive):
"""We received something from the smartphone. Most likely a communication message. Do something with it."""
try:
raw_data_decoded = bytes.decode(raw_data)
print(self.tag + "Received from SP: " + raw_data_decoded)
except UnicodeDecodeError:
pass
if not self._route_db_comm_protocol(raw_data):
print(self.tag + "smartphone command could not be processed correctly!")
return thelast_keepalive
def _route_db_comm_protocol(self, raw_data_encoded):
"""Routing of the DroneBridgeLib communication protocol packets. Only write to local settings if we get a positive
response from the drone! Ping requests are a exception!"""
status = False
extracted_info = comm_message_extract_info(raw_data_encoded) # returns json bytes [0] and crc bytes [1]
try:
loaded_json = json.loads(extracted_info[0].decode())
except UnicodeDecodeError:
print(self.tag + "Invalid command: Could not decode json message")
return False
except ValueError:
print(self.tag + "ValueError on decoding extracted_info[0]")
return False
# Check CRC
if not comm_crc_correct(extracted_info):
message = new_error_response_message('Bad CRC', self.comm_direction.value,
loaded_json['id'])
if self.comm_direction == DBDir.DB_TO_UAV:
self.sendto_smartphone(message, DBPort.DB_PORT_COMMUNICATION.value)
else:
self.sendto_groundstation(message, DBPort.DB_PORT_COMMUNICATION.value)
return False
# Process communication protocol
if loaded_json['destination'] == 1 and self.comm_direction == DBDir.DB_TO_UAV:
message = self._process_db_comm_protocol_type(loaded_json)
if message != "":
status = self.sendto_smartphone(message, self.APP_PORT_COMM)
else:
status = True
elif loaded_json['destination'] == 2:
if self.comm_direction == DBDir.DB_TO_UAV:
# Always process ping requests right away! Do not wait for UAV response!
if loaded_json['type'] == DBCommProt.DB_TYPE_PING_REQUEST.value:
message = self._process_db_comm_protocol_type(loaded_json)
status = self.sendto_smartphone(message, self.APP_PORT_COMM)
response_drone = self._redirect_comm_to_drone(raw_data_encoded)
if type(response_drone) is bytearray:
status = self.sendto_smartphone(response_drone, self.APP_PORT_COMM)
else:
response_drone = self._redirect_comm_to_drone(raw_data_encoded)
if type(response_drone) is bytearray:
message = self._process_db_comm_protocol_type(loaded_json)
self.sendto_smartphone(message, self.APP_PORT_COMM)
status = self.sendto_smartphone(response_drone, self.APP_PORT_COMM)
else:
message = new_error_response_message('UAV was unreachable - command not executed',
DBCommProt.DB_ORIGIN_GND.value, loaded_json['id'])
self.sendto_smartphone(message, self.APP_PORT_COMM)
else:
message = self._process_db_comm_protocol_type(loaded_json)
sentbytes = self.sendto_groundstation(message, DBPort.DB_PORT_COMMUNICATION.value)
if sentbytes == None:
status = True
elif loaded_json['destination'] == 3:
if self.comm_direction == DBDir.DB_TO_UAV:
status = self.sendto_uav(raw_data_encoded, DBPort.DB_PORT_COMMUNICATION.value)
else:
pass
elif loaded_json['destination'] == 4:
if self.comm_direction == DBDir.DB_TO_UAV:
status = self.sendto_smartphone(raw_data_encoded, self.APP_PORT_COMM)
elif loaded_json['destination'] == 5:
if self.comm_direction == DBDir.DB_TO_UAV:
status = self.sendto_uav(raw_data_encoded, DBPort.DB_PORT_COMMUNICATION.value)
else:
message = self._process_db_comm_protocol_type(loaded_json)
if self.sendto_groundstation(message, DBPort.DB_PORT_COMMUNICATION.value) == None:
status = True
else:
print(self.tag + "DB_COMM_PROTO: Unknown message destination")
return status
def _process_db_comm_protocol_type(self, loaded_json):
"""Execute the command given in the DroneBridgeLib communication packet"""
message = ""
if loaded_json['type'] == DBCommProt.DB_TYPE_MSP.value:
# deprecated
self.sendto_uav(base64.b64decode(loaded_json['MSP']), DBPort.DB_PORT_CONTROLLER.value)
elif loaded_json['type'] == DBCommProt.DB_TYPE_SETTINGS_REQUEST.value:
if self.comm_direction == DBDir.DB_TO_UAV:
message = new_settingsresponse_message(loaded_json, DBCommProt.DB_ORIGIN_GND.value)
else:
message = new_settingsresponse_message(loaded_json, DBCommProt.DB_ORIGIN_UAV.value)
elif loaded_json['type'] == DBCommProt.DB_TYPE_SETTINGS_CHANGE.value:
if self.comm_direction == DBDir.DB_TO_UAV:
message = change_settings(loaded_json, DBCommProt.DB_ORIGIN_GND.value)
else:
message = change_settings(loaded_json, DBCommProt.DB_ORIGIN_UAV.value)
elif loaded_json['type'] == DBCommProt.DB_TYPE_SYS_IDENT_REQUEST.value:
if self.comm_direction == DBDir.DB_TO_UAV:
message = create_sys_ident_response(loaded_json, DBCommProt.DB_ORIGIN_GND.value)
else:
message = create_sys_ident_response(loaded_json, DBCommProt.DB_ORIGIN_UAV.value)
elif loaded_json['type'] == DBCommProt.DB_TYPE_PING_REQUEST.value:
if self.comm_direction == DBDir.DB_TO_UAV:
message = new_ping_response_message(loaded_json, DBCommProt.DB_ORIGIN_GND.value)
else:
message = new_ping_response_message(loaded_json, DBCommProt.DB_ORIGIN_UAV.value)
elif loaded_json['type'] == DBCommProt.DB_TYPE_CAMSELECT.value:
change_cam_selection(loaded_json['cam'])
message = new_ack_message(DBCommProt.DB_ORIGIN_UAV.value, loaded_json['id'])
elif loaded_json['type'] == DBCommProt.DB_TYPE_ADJUSTRC.value:
normalize_jscal_axis(loaded_json['device'])
message = new_ack_message(DBCommProt.DB_ORIGIN_GND.value, loaded_json['id'])
else:
if self.comm_direction == DBDir.DB_TO_UAV:
message = new_error_response_message('unsupported message type', DBCommProt.DB_ORIGIN_GND.value,
loaded_json['id'])
else:
message = new_error_response_message('unsupported message type', DBCommProt.DB_ORIGIN_UAV.value,
loaded_json['id'])
print(self.tag + "DB_COMM_PROTO: Unknown message type")
return message
def _redirect_comm_to_drone(self, raw_data_encoded):
"""This one will forward communication message to drone. Response is returned or False"""
if self.first_run:
self._clear_monitor_comm_socket_buffer()
self.first_run = False
self.sendto_uav(raw_data_encoded, DBPort.DB_PORT_COMMUNICATION.value)
response = self.receive_from_db(custom_timeout=0.3)
print(self.tag + "Parsed packet received from drone:")
print(response)
return response
def _sendto_tx_wifi(self, data_bytes):
"""Sends LTM and other stuff to groundstation/smartphone in wifi mode"""
while True:
r, w, e = select.select([], [self.comm_sock], [], 0)
if w:
return self.comm_sock.sendto(data_bytes, (self.ip_rx, self.udp_port_rx))
def _sendto_rx_wifi(self, raw_data_bytes, port_bytes):
"""
Send a packet to drone in wifi mode
depending on message type different ports/programmes aka front ends on the drone need to be addressed
"""
if port_bytes == DBPort.DB_PORT_CONTROLLER.value:
print(self.tag + "Sending MSP command to RX Controller (wifi)")
try:
# TODO
num = 0
pass
except Exception:
return False
print(self.tag + "Sent it!")
else:
print(self.tag + "Sending a message to telemetry frontend on drone")
num = self.comm_sock.sendto(raw_data_bytes, (self.ip_rx, self.udp_port_rx))
return num
def _send_monitor(self, data_bytes, port_bytes, direction):
"""Send a packet in monitor mode using DroneBridgeLib raw protocol v2. Return None on success"""
payload_length_bytes = bytes(len(data_bytes).to_bytes(2, byteorder='little', signed=False))
if self.seq_num == 255:
self.seq_num = 0
else:
self.seq_num += 1
db_v2_raw_header = bytes(bytearray(self.fcf + direction + self.comm_id + port_bytes + payload_length_bytes +
bytes([self.seq_num])))
while True:
r, w, e = select.select([], [self.comm_sock], [], 0)
if w:
return self.comm_sock.sendall(RADIOTAP_HEADER + db_v2_raw_header + data_bytes)
def _open_comm_sock(self):
"""Opens a socket that talks to drone (on tx side) or groundstation (on rx side)"""
if self.mode == "wifi":
return self._open_comm_udpsocket()
else:
return self._open_comm_monitorsocket()
def _open_comm_udpsocket(self):
print(self.tag + "Opening UDP-Socket for DroneBridgeLib communication")
sock = socket(AF_INET, SOCK_DGRAM)
server_address = ('', self.udp_port_rx)
sock.bind(server_address)
if self.comm_direction.value == b'\x00':
sock.settimeout(1)
else:
sock.setblocking(False)
return sock
def _open_comm_monitorsocket(self):
print(self.tag + "Opening socket for monitor mode")
raw_socket = socket(AF_PACKET, SOCK_RAW, htons(0x0004))
raw_socket.bind((self.interface, 0))
raw_socket = self._set_comm_socket_behavior(raw_socket)
if self.comm_direction == DBDir.DB_TO_GND:
raw_socket = attach_filter(raw_socket, byte_comm_id=self.comm_id, byte_direction=DBDir.DB_TO_UAV.value,
byte_port=self.db_port) # filter for packets TO_DRONE
else:
raw_socket = attach_filter(raw_socket, byte_comm_id=self.comm_id, byte_direction=DBDir.DB_TO_GND.value,
byte_port=self.db_port) # filter for packets TO_GROUND
return raw_socket
def _set_comm_socket_behavior(self, thesocket):
"""Set to blocking or non-blocking depending on Module (Telemetry, Communication) and if on drone or ground"""
adjusted_socket = thesocket
# On drone side in telemetry module
if self.comm_direction == DBDir.DB_TO_GND and self.db_port == DBPort.DB_PORT_TELEMETRY.value:
adjusted_socket.setblocking(False)
# On ground side in comm module
elif self.comm_direction == DBDir.DB_TO_UAV and self.db_port == DBPort.DB_PORT_COMMUNICATION.value:
adjusted_socket.setblocking(False)
return adjusted_socket
def _clear_monitor_comm_socket_buffer(self):
self.comm_sock.setblocking(False)
while True:
readable, writable, exceptional = select.select([self.comm_sock], [], [], 1)
if readable:
self.comm_sock.recv(8192)
else:
break
self.comm_sock.setblocking(True)
def _open_android_udpsocket(self):
print(self.tag + "Opening UDP-Socket to smartphone on port: " + str(self.udp_port_smartphone))
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
if self.db_port == DBPort.DB_PORT_COMMUNICATION.value:
address = ('', self.udp_port_smartphone)
sock.bind(address)
sock.setblocking(False)
return sock
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Utilities for extracting color channels out of arbitrary images
"""
import cv2
import numpy as np
import enum
from .text import putTextAutoscale
__all__ = ["ColorspaceChannel", "Colorspace", "convert_to_colorspace",
"extract_channel", "colorspace_components_overview"]
class Colorspace(enum.IntEnum):
BGR = 0 # "Standard" OpenCV colorspace
RGB = 1
HSV = 2
LAB = 3
YUV = 4
YCrCb = 5
HLS = 6
LUV = 7
XYZ = 8
@property
def channels(colspace):
"""
Get a tuple of all three ColorspaceChannels
for the given colorspace
"""
return (ColorspaceChannel(colspace.value * 3),
ColorspaceChannel(colspace.value * 3 + 1),
ColorspaceChannel(colspace.value * 3 + 2))
class ColorspaceChannel(enum.IntEnum):
"""
Different types of color channels
"""
# BGR
BGR_Blue = 0
BGR_Green = 1
BGR_Red = 2
# RGB
RGB_Red = 3
RGB_Green = 4
RGB_Blue = 5
# HSV
HSV_Hue = 6
HSV_Saturation = 7
HSV_Value = 8
# LAB
LAB_L = 9
LAB_a = 10
LAB_b = 11
# YUV
YUV_Luma = 12 # Y
YUV_U = 13
YUV_V = 14
# YCrCb
YCrCb_Luma = 15 # Y
YCrCb_Cr = 16
YCrCb_Cb = 17
# HLS
HLS_Hue = 18
HLS_Lightness = 19
HLS_Saturation = 20
# LUV
LUV_L = 21
LUV_U = 22
LUV_V = 23
# XYZ
XYZ_X = 24
XYZ_Y = 25
XYZ_Z = 26
@property
def colorspace(self):
"""
Get the colorspace for the current instance
"""
return Colorspace(self.value // 3)
@property
def channel_idx(self):
"""
Get the channel number for the colorspace (0 to 2)
"""
return self.value % 3
@property
def channel_name(self):
"""
The name of the channel,
not including the colorspace name.
Example: RGB_Red => Red
"""
return self.name.partition("_")[2]
# Arguments to convert BGR to another colorspace
_colorspace_cvt_from_bgr = {
Colorspace.BGR: None,
Colorspace.RGB: cv2.COLOR_BGR2RGB,
Colorspace.HSV: cv2.COLOR_BGR2HSV,
Colorspace.LAB: cv2.COLOR_BGR2LAB,
Colorspace.YUV: cv2.COLOR_BGR2YUV,
Colorspace.YCrCb: cv2.COLOR_BGR2YCrCb,
Colorspace.HLS: cv2.COLOR_BGR2HLS,
Colorspace.LUV: cv2.COLOR_BGR2LUV,
Colorspace.XYZ: cv2.COLOR_BGR2XYZ
}
# Arguments to convert a colorspace to BGR
_colorspace_cvt_to_bgr = {
Colorspace.BGR: None,
Colorspace.RGB: cv2.COLOR_RGB2BGR,
Colorspace.HSV: cv2.COLOR_HSV2BGR,
Colorspace.LAB: cv2.COLOR_LAB2BGR,
Colorspace.YUV: cv2.COLOR_YUV2BGR,
Colorspace.YCrCb: cv2.COLOR_YCrCb2BGR,
Colorspace.HLS: cv2.COLOR_HLS2BGR,
Colorspace.LUV: cv2.COLOR_LUV2BGR,
Colorspace.XYZ: cv2.COLOR_XYZ2BGR
}
def convert_to_colorspace(img, new_colorspace, source=Colorspace.BGR):
"""
Convert an image in an arbitrary colorspace
to another colorspace using OpenCV
Parameters
==========
img : NumPy image
Any supported OpenCV image
new_colorspace : Colorspace enum
The target colorspace
source : Colorspace enum
The source colorspace.
If in doubt, BGR is probably right
Returns
=======
The converted image, or img if
source == target.
"""
# Convert from source to BGR
if source != Colorspace.BGR:
img = cv2.cvtColor(img, _colorspace_cvt_to_bgr[source])
# Convert to target
cvt = _colorspace_cvt_from_bgr[new_colorspace]
if cvt is None: # Already in target
return img
return cv2.cvtColor(img, cvt)
def extract_channel(img, channel, source=Colorspace.BGR, as_rgb=False):
"""
Extract a single channel from an arbitrary colorspace
from an image
Parameters
==========
img : NumPy / OpenCV image
channel : ColorspaceChannel enum
The target channel
source : Colorspace enum
The current colorspace of the imge
as_rgb : bool
Set to True to obtain the graysca
Returns
=======
The resulting channel as a NumPy image.
The returned array is similar to a grayscale image.
"""
target_space = channel.colorspace
# Convert to the correct colorspace
img = convert_to_colorspace(img, target_space, source)
# Extract appropriate channel
gray = img[:,:,channel.channel_idx]
return cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB) if as_rgb else gray
def colorspace_components_overview(img):
"""
Render an image that shows all channels of the given image
in all colorspaces in an ordered and labeled manner.
"""
height, width, _ = img.shape
ncolspaces = len(Colorspace)
hspace = int(0.1 * width)
vspace = int(0.1 * height)
textheight = int(0.3 * height)
h = ncolspaces * height + vspace * (ncolspaces - 1) + textheight * ncolspaces
w = width * 3 + hspace * 2
out = np.full((h, w), 255, dtype=np.uint8)
for i, colorspace in enumerate(Colorspace):
# Compute offsets
vofs = textheight * (i + 1) + (vspace + height) * i
hofs = lambda col: hspace * col + width * col
textvofs = vofs - textheight / 2
texthofs = lambda col: hofs(col) + width / 2
# Get channels of current colorspace
channels = colorspace.channels
# Channel text
chn0txt = "{} {}".format(colorspace.name, colorspace.channels[0].channel_name)
chn1txt = "{} {}".format(colorspace.name, colorspace.channels[1].channel_name)
chn2txt = "{} {}".format(colorspace.name, colorspace.channels[2].channel_name)
# Extract all channels and convert to gray RGB mge
chn0 = extract_channel(img, channels[0])
chn1 = extract_channel(img, channels[1])
chn2 = extract_channel(img, channels[2])
# Copy image channels to output
out[vofs:vofs + height, hofs(0):hofs(0) + width] = chn0
out[vofs:vofs + height, hofs(1):hofs(1) + width] = chn1
out[vofs:vofs + height, hofs(2):hofs(2) + width] = chn2
# Render text
putTextAutoscale(out, chn0txt, (texthofs(0), textvofs),
cv2.FONT_HERSHEY_COMPLEX, width, textheight, color=0)
putTextAutoscale(out, chn1txt, (texthofs(1), textvofs),
cv2.FONT_HERSHEY_COMPLEX, width, textheight, color=0)
putTextAutoscale(out, chn2txt, (texthofs(2), textvofs),
cv2.FONT_HERSHEY_COMPLEX, width, textheight, color=0)
return out
| |
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from time import time
from unittest import main, TestCase
from test.unit import FakeLogger, FakeRing, mocked_http_conn
from copy import deepcopy
from tempfile import mkdtemp
from shutil import rmtree
import mock
from swift.common import internal_client, utils
from swift.obj import expirer
def not_random():
return 0.5
last_not_sleep = 0
def not_sleep(seconds):
global last_not_sleep
last_not_sleep = seconds
class TestObjectExpirer(TestCase):
maxDiff = None
def setUp(self):
global not_sleep
self.old_loadapp = internal_client.loadapp
self.old_sleep = internal_client.sleep
internal_client.loadapp = lambda *a, **kw: None
internal_client.sleep = not_sleep
self.rcache = mkdtemp()
self.logger = FakeLogger()
def teardown(self):
rmtree(self.rcache)
internal_client.sleep = self.old_sleep
internal_client.loadapp = self.loadapp
def test_get_process_values_from_kwargs(self):
x = expirer.ObjectExpirer({})
vals = {
'processes': 5,
'process': 1,
}
self.assertEqual((5, 1), x.get_process_values(vals))
def test_get_process_values_from_config(self):
vals = {
'processes': 5,
'process': 1,
}
x = expirer.ObjectExpirer(vals)
self.assertEqual((5, 1), x.get_process_values({}))
def test_get_process_values_negative_process(self):
vals = {
'processes': 5,
'process': -1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_negative_processes(self):
vals = {
'processes': -5,
'process': 1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_process_greater_than_processes(self):
vals = {
'processes': 5,
'process': 7,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_init_concurrency_too_small(self):
conf = {
'concurrency': 0,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
conf = {
'concurrency': -1,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
def test_process_based_concurrency(self):
class ObjectExpirer(expirer.ObjectExpirer):
def __init__(self, conf):
super(ObjectExpirer, self).__init__(conf)
self.processes = 3
self.deleted_objects = {}
def delete_object(self, actual_obj, timestamp, container, obj):
if container not in self.deleted_objects:
self.deleted_objects[container] = set()
self.deleted_objects[container].add(obj)
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(self, *a, **kw):
return len(self.containers.keys()), \
sum([len(self.containers[x]) for x in self.containers])
def iter_containers(self, *a, **kw):
return [{'name': x} for x in self.containers.keys()]
def iter_objects(self, account, container):
return [{'name': x} for x in self.containers[container]]
def delete_container(*a, **kw):
pass
ukey = u'3'
containers = {
0: set('1-one 2-two 3-three'.split()),
1: set('2-two 3-three 4-four'.split()),
2: set('5-five 6-six'.split()),
ukey: set(u'7-seven\u2661'.split()),
}
x = ObjectExpirer({})
x.swift = InternalClient(containers)
deleted_objects = {}
for i in xrange(3):
x.process = i
x.run_once()
self.assertNotEqual(deleted_objects, x.deleted_objects)
deleted_objects = deepcopy(x.deleted_objects)
self.assertEqual(containers[ukey].pop(),
deleted_objects[ukey].pop().decode('utf8'))
self.assertEqual(containers, deleted_objects)
def test_delete_object(self):
class InternalClient(object):
container_ring = None
def __init__(self, test, account, container, obj):
self.test = test
self.account = account
self.container = container
self.obj = obj
self.delete_object_called = False
class DeleteActualObject(object):
def __init__(self, test, actual_obj, timestamp):
self.test = test
self.actual_obj = actual_obj
self.timestamp = timestamp
self.called = False
def __call__(self, actual_obj, timestamp):
self.test.assertEqual(self.actual_obj, actual_obj)
self.test.assertEqual(self.timestamp, timestamp)
self.called = True
container = 'container'
obj = 'obj'
actual_obj = 'actual_obj'
timestamp = 'timestamp'
x = expirer.ObjectExpirer({}, logger=self.logger)
x.swift = \
InternalClient(self, x.expiring_objects_account, container, obj)
x.delete_actual_object = \
DeleteActualObject(self, actual_obj, timestamp)
delete_object_called = []
def pop_queue(c, o):
self.assertEqual(container, c)
self.assertEqual(obj, o)
delete_object_called[:] = [True]
x.pop_queue = pop_queue
x.delete_object(actual_obj, timestamp, container, obj)
self.assertTrue(delete_object_called)
self.assertTrue(x.delete_actual_object.called)
def test_report(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
x.report()
self.assertEqual(x.logger.log_dict['info'], [])
x.logger._clear()
x.report(final=True)
self.assertTrue('completed' in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
self.assertTrue('so far' not in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
self.assertTrue('completed' not in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
self.assertTrue('so far' in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
def test_run_once_nothing_to_do(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
self.assertEqual(x.logger.log_dict['exception'],
[(("Unhandled exception",), {},
"'str' object has no attribute "
"'get_account_info'")])
def test_run_once_calls_report(self):
class InternalClient(object):
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(*a, **kw):
return []
x = expirer.ObjectExpirer({}, logger=self.logger)
x.swift = InternalClient()
x.run_once()
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
def test_container_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def iter_objects(*a, **kw):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer({'recon_cache_path': self.rcache},
logger=self.logger)
x.swift = InternalClient([{'name': str(int(time() + 86400))}])
x.run_once()
for exccall in x.logger.log_dict['exception']:
self.assertTrue(
'This should not have been called' not in exccall[0][0])
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
# Reverse test to be sure it still would blow up the way expected.
fake_swift = InternalClient([{'name': str(int(time() - 86400))}])
x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
x.run_once()
self.assertEqual(
x.logger.log_dict['exception'],
[(('Unhandled exception',), {},
str(Exception('This should not have been called')))])
def test_object_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def should_not_be_called(*a, **kw):
raise Exception('This should not have been called')
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() + 86400)}])
x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
x.run_once()
for exccall in x.logger.log_dict['exception']:
self.assertTrue(
'This should not have been called' not in exccall[0][0])
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
x.delete_actual_object = should_not_be_called
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEqual(
excswhiledeleting,
['Exception while deleting object %d %d-actual-obj '
'This should not have been called' % (ts, ts)])
def test_failed_delete_keeps_entry(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def deliberately_blow_up(actual_obj, timestamp):
raise Exception('failed to delete actual object')
def should_not_get_called(container, obj):
raise Exception('This should not have been called')
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
x.iter_containers = lambda: [str(int(time() - 86400))]
x.delete_actual_object = deliberately_blow_up
x.pop_queue = should_not_get_called
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEqual(
excswhiledeleting,
['Exception while deleting object %d %d-actual-obj '
'failed to delete actual object' % (ts, ts)])
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
self.logger._clear()
x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = should_not_get_called
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEqual(
excswhiledeleting,
['Exception while deleting object %d %d-actual-obj This should '
'not have been called' % (ts, ts)])
def test_success_gets_counted(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() - 86400)}])
x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 1 objects expired',), {})])
def test_delete_actual_object_does_not_get_unicode(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
got_unicode = [False]
def delete_actual_object_test_for_unicode(actual_obj, timestamp):
if isinstance(actual_obj, unicode):
got_unicode[0] = True
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': u'%d-actual-obj' % int(time() - 86400)}])
x = expirer.ObjectExpirer({}, logger=self.logger, swift=fake_swift)
x.delete_actual_object = delete_actual_object_test_for_unicode
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 1 objects expired',), {})])
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
raise Exception('failed to delete container')
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
x = expirer.ObjectExpirer({}, logger=self.logger)
cts = int(time() - 86400)
ots = int(time() - 86400)
containers = [
{'name': str(cts)},
{'name': str(cts + 1)},
]
objects = [
{'name': '%d-actual-obj' % ots},
{'name': '%d-next-obj' % ots}
]
x.swift = InternalClient(containers, objects)
x.delete_actual_object = fail_delete_actual_object
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEqual(sorted(excswhiledeleting), sorted([
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object' % (cts, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object' % (cts, ots),
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object' % (cts + 1, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
'container' % (cts,),
'Exception while deleting container %d failed to delete '
'container' % (cts + 1,)]))
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
def raise_system_exit():
raise SystemExit('test_run_forever')
interval = 1234
x = expirer.ObjectExpirer({'__file__': 'unit_test',
'interval': interval})
orig_random = expirer.random
orig_sleep = expirer.sleep
try:
expirer.random = not_random
expirer.sleep = not_sleep
x.run_once = raise_system_exit
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.random = orig_random
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'test_run_forever')
self.assertEqual(last_not_sleep, 0.5 * interval)
def test_run_forever_catches_usual_exceptions(self):
raises = [0]
def raise_exceptions():
raises[0] += 1
if raises[0] < 2:
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
x = expirer.ObjectExpirer({}, logger=self.logger)
orig_sleep = expirer.sleep
try:
expirer.sleep = not_sleep
x.run_once = raise_exceptions
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'exiting exception 2')
self.assertEqual(x.logger.log_dict['exception'],
[(('Unhandled exception',), {},
'exception 1')])
def test_delete_actual_object(self):
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
def test_delete_actual_object_nourlquoting(self):
# delete_actual_object should not do its own url quoting because
# internal client's make_request handles that.
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object name', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
def test_delete_actual_object_raises_404(self):
def fake_app(env, start_response):
start_response('404 Not Found', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
self.assertRaises(internal_client.UnexpectedResponse,
x.delete_actual_object, '/path/to/object', '1234')
def test_delete_actual_object_handles_412(self):
def fake_app(env, start_response):
start_response('412 Precondition Failed',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
x.delete_actual_object('/path/to/object', '1234')
def test_delete_actual_object_does_not_handle_odd_stuff(self):
def fake_app(env, start_response):
start_response(
'503 Internal Server Error',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
exc = None
try:
x.delete_actual_object('/path/to/object', '1234')
except Exception as err:
exc = err
finally:
pass
self.assertEqual(503, exc.resp.status_int)
def test_delete_actual_object_quotes(self):
name = 'this name should get quoted'
timestamp = '1366063156.863045'
x = expirer.ObjectExpirer({})
x.swift.make_request = mock.MagicMock()
x.delete_actual_object(name, timestamp)
x.swift.make_request.assert_called_once()
self.assertEqual(x.swift.make_request.call_args[0][1],
'/v1/' + urllib.quote(name))
def test_pop_queue(self):
class InternalClient(object):
container_ring = FakeRing()
x = expirer.ObjectExpirer({}, logger=self.logger,
swift=InternalClient())
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests) as fake_conn:
x.pop_queue('c', 'o')
self.assertRaises(StopIteration, fake_conn.code_iter.next)
for method, path in requests:
self.assertEqual(method, 'DELETE')
device, part, account, container, obj = utils.split_path(
path, 5, 5, True)
self.assertEqual(account, '.expiring_objects')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')
if __name__ == '__main__':
main()
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import functools
import json
import os
import re
import sys
import time
import urllib
import uuid
import fixtures
import testscenarios
import testtools
from tempest import clients
from tempest.common import credentials
import tempest.common.generator.valid_generator as valid
from tempest import config
from tempest import exceptions
from tempest.openstack.common import importutils
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = config.CONF
def attr(*args, **kwargs):
"""A decorator which applies the testtools attr decorator
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
if kwargs['type'] == 'smoke':
f = testtools.testcase.attr('gate')(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
if attr == 'smoke':
f = testtools.testcase.attr('gate')(f)
return f
return decorator
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'orchestration': CONF.service_available.heat,
# NOTE(mtreinish) nova-network will provide networking functionality
# if neutron isn't available, so always set to True.
'network': True,
'identity': True,
'object_storage': CONF.service_available.swift,
'dashboard': CONF.service_available.horizon,
'telemetry': CONF.service_available.ceilometer,
'data_processing': CONF.service_available.sahara
}
return service_list
def services(*args, **kwargs):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
def decorator(f):
services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
'network', 'identity', 'object_storage', 'dashboard',
'telemetry', 'data_processing']
for service in args:
if service not in services:
raise exceptions.InvalidServiceTag('%s is not a valid '
'service' % service)
attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
return f(self, *func_args, **func_kwargs)
return wrapper
return decorator
def stresstest(*args, **kwargs):
"""Add stress test decorator
For all functions with this decorator a attr stress will be
set automatically.
@param class_setup_per: allowed values are application, process, action
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
@param allow_inheritance: allows inheritance of this attribute
"""
def decorator(f):
if 'class_setup_per' in kwargs:
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
else:
setattr(f, "st_class_setup_per", 'process')
if 'allow_inheritance' in kwargs:
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
else:
setattr(f, "st_allow_inheritance", False)
attr(type='stress')(f)
return f
return decorator
def requires_ext(*args, **kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
config_dict = {
'compute': CONF.compute_feature_enabled.api_extensions,
'volume': CONF.volume_feature_enabled.api_extensions,
'network': CONF.network_feature_enabled.api_extensions,
'object': CONF.object_storage_feature_enabled.discoverable_apis,
}
if len(config_dict[service]) == 0:
return False
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
at_exit_set = set()
def validate_tearDownClass():
if at_exit_set:
LOG.error(
"tearDownClass does not call the super's "
"tearDownClass in these classes: \n"
+ str(at_exit_set))
atexit.register(validate_tearDownClass)
class BaseTestCase(testtools.testcase.WithAttributes,
testtools.TestCase):
"""The test base class defines Tempest framework for class level fixtures.
`setUpClass` and `tearDownClass` are defined here and cannot be overwritten
by subclasses (enforced via hacking rule T105).
Set-up is split in a series of steps (setup stages), which can be
overwritten by test classes. Set-up stages are:
- skip_checks
- setup_credentials
- setup_clients
- resource_setup
Tear-down is also split in a series of steps (teardown stages), which are
stacked for execution only if the corresponding setup stage had been
reached during the setup phase. Tear-down stages are:
- clear_isolated_creds (defined in the base test class)
- resource_cleanup
"""
setUpClassCalled = False
_service = None
network_resources = {}
# NOTE(sdague): log_format is defined inline here instead of using the oslo
# default because going through the config path recouples config to the
# stress tests too early, and depending on testr order will fail unit tests
log_format = ('%(asctime)s %(process)d %(levelname)-8s '
'[%(name)s] %(message)s')
@classmethod
def setUpClass(cls):
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
# Stack of (name, callable) to be invoked in reverse order at teardown
cls.teardowns = []
# All the configuration checks that may generate a skip
cls.skip_checks()
try:
# Allocation of all required credentials and client managers
cls.teardowns.append(('credentials', cls.clear_isolated_creds))
cls.setup_credentials()
# Shortcuts to clients
cls.setup_clients()
# Additional class-wide test resources
cls.teardowns.append(('resources', cls.resource_cleanup))
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
etype, cls.__name__))
cls.tearDownClass()
try:
raise etype, value, trace
finally:
del trace # to avoid circular refs
@classmethod
def tearDownClass(cls):
at_exit_set.discard(cls)
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
# Save any existing exception, we always want to re-raise the original
# exception only
etype, value, trace = sys.exc_info()
# If there was no exception during setup we shall re-raise the first
# exception in teardown
re_raise = (etype is None)
while cls.teardowns:
name, teardown = cls.teardowns.pop()
# Catch any exception in tearDown so we can re-raise the original
# exception at the end
try:
teardown()
except Exception as te:
sys_exec_info = sys.exc_info()
tetype = sys_exec_info[0]
# TODO(andreaf): Till we have the ability to cleanup only
# resources that were successfully setup in resource_cleanup,
# log AttributeError as info instead of exception.
if tetype is AttributeError and name == 'resources':
LOG.info("tearDownClass of %s failed: %s" % (name, te))
else:
LOG.exception("teardown of %s failed: %s" % (name, te))
if not etype:
etype, value, trace = sys_exec_info
# If exceptions were raised during teardown, an not before, re-raise
# the first one
if re_raise and etype is not None:
try:
raise etype, value, trace
finally:
del trace # to avoid circular refs
@classmethod
def skip_checks(cls):
"""Class level skip checks. Subclasses verify in here all
conditions that might prevent the execution of the entire test class.
Checks implemented here may not make use API calls, and should rely on
configuration alone.
In general skip checks that require an API call are discouraged.
If one is really needed it may be implemented either in the
resource_setup or at test level.
"""
pass
@classmethod
def setup_credentials(cls):
"""Allocate credentials and the client managers from them."""
# TODO(andreaf) There is a fair amount of code that could me moved from
# base / test classes in here. Ideally tests should be able to only
# specify a list of (additional) credentials the need to use.
pass
@classmethod
def setup_clients(cls):
"""Create links to the clients into the test object."""
# TODO(andreaf) There is a fair amount of code that could me moved from
# base / test classes in here. Ideally tests should be able to only
# specify which client is `client` and nothing else.
pass
@classmethod
def resource_setup(cls):
"""Class level resource setup for test cases.
"""
pass
@classmethod
def resource_cleanup(cls):
"""Class level resource cleanup for test cases.
Resource cleanup must be able to handle the case of partially setup
resources, in case a failure during `resource_setup` should happen.
"""
pass
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's"
"setUpClass in the "
+ self.__class__.__name__)
at_exit_set.add(self.__class__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
os.environ.get('OS_LOG_CAPTURE') != '0'):
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=self.log_format,
level=None))
@classmethod
def get_client_manager(cls):
"""
Returns an OpenStack client manager
"""
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
if (not hasattr(cls, 'isolated_creds') or
not cls.isolated_creds.name == cls.__name__):
cls.isolated_creds = credentials.get_isolated_credentials(
name=cls.__name__, network_resources=cls.network_resources,
force_tenant_isolation=force_tenant_isolation,
)
creds = cls.isolated_creds.get_primary_creds()
os = clients.Manager(credentials=creds, service=cls._service)
return os
@classmethod
def clear_isolated_creds(cls):
"""
Clears isolated creds if set
"""
if hasattr(cls, 'isolated_creds'):
cls.isolated_creds.clear_isolated_creds()
@classmethod
def _get_identity_admin_client(cls):
"""
Returns an instance of the Identity Admin API client
"""
os = clients.AdminManager(service=cls._service)
admin_client = os.identity_client
return admin_client
@classmethod
def set_network_resources(cls, network=False, router=False, subnet=False,
dhcp=False):
"""Specify which network resources should be created
@param network
@param router
@param subnet
@param dhcp
"""
# network resources should be set only once from callers
# in order to ensure that even if it's called multiple times in
# a chain of overloaded methods, the attribute is set only
# in the leaf class
if not cls.network_resources:
cls.network_resources = {
'network': network,
'router': router,
'subnet': subnet,
'dhcp': dhcp}
def assertEmpty(self, list, msg=None):
self.assertTrue(len(list) == 0, msg)
def assertNotEmpty(self, list, msg=None):
self.assertTrue(len(list) > 0, msg)
class NegativeAutoTest(BaseTestCase):
_resources = {}
@classmethod
def setUpClass(cls):
super(NegativeAutoTest, cls).setUpClass()
os = cls.get_client_manager()
cls.client = os.negative_client
os_admin = clients.AdminManager(service=cls._service)
cls.admin_client = os_admin.negative_client
@staticmethod
def load_tests(*args):
"""
Wrapper for testscenarios to set the mandatory scenarios variable
only in case a real test loader is in place. Will be automatically
called in case the variable "load_tests" is set.
"""
if getattr(args[0], 'suiteClass', None) is not None:
loader, standard_tests, pattern = args
else:
standard_tests, module, loader = args
for test in testtools.iterate_tests(standard_tests):
schema = getattr(test, '_schema', None)
if schema is not None:
setattr(test, 'scenarios',
NegativeAutoTest.generate_scenario(schema))
return testscenarios.load_tests_apply_scenarios(*args)
@staticmethod
def generate_scenario(description):
"""
Generates the test scenario list for a given description.
:param description: A file or dictionary with the following entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
generator.validate_schema(description)
schema = description.get("json-schema", None)
resources = description.get("resources", [])
scenario_list = []
expected_result = None
for resource in resources:
if isinstance(resource, dict):
expected_result = resource['expected_result']
resource = resource['name']
LOG.debug("Add resource to test %s" % resource)
scn_name = "inv_res_%s" % (resource)
scenario_list.append((scn_name, {"resource": (resource,
str(uuid.uuid4())),
"expected_result": expected_result
}))
if schema is not None:
for scenario in generator.generate_scenarios(schema):
scenario_list.append((scenario['_negtest_name'],
scenario))
LOG.debug(scenario_list)
return scenario_list
def execute(self, description):
"""
Execute a http call on an api that are expected to
result in client errors. First it uses invalid resources that are part
of the url, and then invalid data for queries and http request bodies.
:param description: A json file or dictionary with the following
entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
schema = description.get("json-schema", None)
method = description["http-method"]
url = description["url"]
expected_result = None
if "default_result_code" in description:
expected_result = description["default_result_code"]
resources = [self.get_resource(r) for
r in description.get("resources", [])]
if hasattr(self, "resource"):
# Note(mkoderer): The resources list already contains an invalid
# entry (see get_resource).
# We just send a valid json-schema with it
valid_schema = None
if schema:
valid_schema = \
valid.ValidTestGenerator().generate_valid(schema)
new_url, body = self._http_arguments(valid_schema, url, method)
elif hasattr(self, "_negtest_name"):
schema_under_test = \
valid.ValidTestGenerator().generate_valid(schema)
local_expected_result = \
generator.generate_payload(self, schema_under_test)
if local_expected_result is not None:
expected_result = local_expected_result
new_url, body = \
self._http_arguments(schema_under_test, url, method)
else:
raise Exception("testscenarios are not active. Please make sure "
"that your test runner supports the load_tests "
"mechanism")
if "admin_client" in description and description["admin_client"]:
client = self.admin_client
else:
client = self.client
resp, resp_body = client.send_request(method, new_url,
resources, body=body)
self._check_negative_response(expected_result, resp.status, resp_body)
def _http_arguments(self, json_dict, url, method):
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
if not json_dict:
return url, None
elif method in ["GET", "HEAD", "PUT", "DELETE"]:
return "%s?%s" % (url, urllib.urlencode(json_dict)), None
else:
return url, json.dumps(json_dict)
def _check_negative_response(self, expected_result, result, body):
self.assertTrue(result >= 400 and result < 500 and result != 413,
"Expected client error, got %s:%s" %
(result, body))
self.assertTrue(expected_result is None or expected_result == result,
"Expected %s, got %s:%s" %
(expected_result, result, body))
@classmethod
def set_resource(cls, name, resource):
"""
This function can be used in setUpClass context to register a resoruce
for a test.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
:resource: The id of the resource
"""
cls._resources[name] = resource
def get_resource(self, name):
"""
Return a valid uuid for a type of resource. If a real resource is
needed as part of a url then this method should return one. Otherwise
it can return None.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
"""
if isinstance(name, dict):
name = name['name']
if hasattr(self, "resource") and self.resource[0] == name:
LOG.debug("Return invalid resource (%s) value: %s" %
(self.resource[0], self.resource[1]))
return self.resource[1]
if name in self._resources:
return self._resources[name]
return None
def SimpleNegativeAutoTest(klass):
"""
This decorator registers a test function on basis of the class name.
"""
@attr(type=['negative', 'gate'])
def generic_test(self):
if hasattr(self, '_schema'):
self.execute(self._schema)
cn = klass.__name__
cn = cn.replace('JSON', '')
cn = cn.replace('Test', '')
# NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
func_name = 'test_%s' % lower_cn
setattr(klass, func_name, generic_test)
return klass
def call_until_true(func, duration, sleep_for):
"""
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func():
return True
time.sleep(sleep_for)
now = time.time()
return False
| |
#!/usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
"""
File format example: test_spec.json:
{
"targets": {
"KL46Z": ["ARM", "GCC_ARM"],
"LPC1768": ["ARM", "GCC_ARM", "GCC_CR", "IAR"],
"LPC11U24": ["uARM"],
"NRF51822": ["ARM"]
}
}
File format example: muts_all.json:
{
"1" : {"mcu": "LPC1768",
"port":"COM4",
"disk":"J:\\",
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
},
"2" : {"mcu": "KL25Z",
"port":"COM7",
"disk":"G:\\",
"peripherals": ["digital_loop", "port_loop", "analog_loop"]
}
}
"""
# Be sure that the tools directory is in the search path
import sys
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
# Check: Extra modules which are required by core test suite
from tools.utils import check_required_modules
check_required_modules(['prettytable', 'serial'])
# Imports related to mbed build api
from tools.build_api import mcu_toolchain_matrix
# Imports from TEST API
from tools.test_api import SingleTestRunner
from tools.test_api import singletest_in_cli_mode
from tools.test_api import detect_database_verbose
from tools.test_api import get_json_data_from_file
from tools.test_api import get_avail_tests_summary_table
from tools.test_api import get_default_test_options_parser
from tools.test_api import print_muts_configuration_from_json
from tools.test_api import print_test_configuration_from_json
from tools.test_api import get_autodetected_MUTS_list
from tools.test_api import get_autodetected_TEST_SPEC
from tools.test_api import get_module_avail
from tools.test_exporters import ReportExporter, ResultExporterType
# Importing extra modules which can be not installed but if available they can extend test suite functionality
try:
import mbed_lstools
from tools.compliance.ioper_runner import IOperTestRunner
from tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
def get_version():
""" Returns test script version
"""
single_test_version_major = 1
single_test_version_minor = 5
return (single_test_version_major, single_test_version_minor)
if __name__ == '__main__':
# Command line options
parser = get_default_test_options_parser()
parser.description = """This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s)."""
parser.epilog = """Example: singletest.py -i test_spec.json -M muts_all.json"""
opts = parser.parse_args()
# Print scrip version
if opts.version:
print parser.description
print parser.epilog
print "Version %d.%d"% get_version()
exit(0)
if opts.db_url and opts.verbose_test_configuration_only:
detect_database_verbose(opts.db_url)
exit(0)
# Print summary / information about automation test status
if opts.test_automation_report:
print get_avail_tests_summary_table(platform_filter=opts.general_filter_regex)
exit(0)
# Print summary / information about automation test status
if opts.test_case_report:
test_case_report_cols = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration',
'source_dir']
print get_avail_tests_summary_table(cols=test_case_report_cols,
result_summary=False,
join_delim='\n',
platform_filter=opts.general_filter_regex)
exit(0)
# Only prints matrix of supported toolchains
if opts.supported_toolchains:
print mcu_toolchain_matrix(platform_filter=opts.general_filter_regex)
exit(0)
test_spec = None
MUTs = None
if hasattr(opts, 'auto_detect') and opts.auto_detect:
# If auto_detect attribute is present, we assume other auto-detection
# parameters like 'toolchains_filter' are also set.
print "MBEDLS: Detecting connected mbed-enabled devices... "
MUTs = get_autodetected_MUTS_list()
for mut in MUTs.values():
print "MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu'],
mut['port'],
mut['disk'])
# Set up parameters for test specification filter function (we need to set toolchains per target here)
use_default_toolchain = 'default' in opts.toolchains_filter if opts.toolchains_filter is not None else True
use_supported_toolchains = 'all' in opts.toolchains_filter if opts.toolchains_filter is not None else False
toolchain_filter = opts.toolchains_filter
platform_name_filter = opts.general_filter_regex if opts.general_filter_regex is not None else opts.general_filter_regex
# Test specification with information about each target and associated toolchain
test_spec = get_autodetected_TEST_SPEC(MUTs.values(),
use_default_toolchain=use_default_toolchain,
use_supported_toolchains=use_supported_toolchains,
toolchain_filter=toolchain_filter,
platform_name_filter=platform_name_filter)
else:
# Open file with test specification
# test_spec_filename tells script which targets and their toolchain(s)
# should be covered by the test scenario
opts.auto_detect = False
test_spec = get_json_data_from_file(opts.test_spec_filename) if opts.test_spec_filename else None
if test_spec is None:
if not opts.test_spec_filename:
parser.print_help()
exit(-1)
# Get extra MUTs if applicable
MUTs = get_json_data_from_file(opts.muts_spec_filename) if opts.muts_spec_filename else None
if MUTs is None:
if not opts.muts_spec_filename:
parser.print_help()
exit(-1)
if opts.verbose_test_configuration_only:
print "MUTs configuration in %s:" % ('auto-detected' if opts.auto_detect else opts.muts_spec_filename)
if MUTs:
print print_muts_configuration_from_json(MUTs, platform_filter=opts.general_filter_regex)
print
print "Test specification in %s:" % ('auto-detected' if opts.auto_detect else opts.test_spec_filename)
if test_spec:
print print_test_configuration_from_json(test_spec)
exit(0)
if get_module_avail('mbed_lstools'):
if opts.operability_checks:
# Check if test scope is valid and run tests
test_scope = get_available_oper_test_scopes()
if opts.operability_checks in test_scope:
tests = IOperTestRunner(scope=opts.operability_checks)
test_results = tests.run()
# Export results in form of JUnit XML report to separate file
if opts.report_junit_file_name:
report_exporter = ReportExporter(ResultExporterType.JUNIT_OPER)
report_exporter.report_to_file(test_results, opts.report_junit_file_name)
else:
print "Unknown interoperability test scope name: '%s'" % (opts.operability_checks)
print "Available test scopes: %s" % (','.join(["'%s'" % n for n in test_scope]))
exit(0)
# Verbose test specification and MUTs configuration
if MUTs and opts.verbose:
print print_muts_configuration_from_json(MUTs)
if test_spec and opts.verbose:
print print_test_configuration_from_json(test_spec)
if opts.only_build_tests:
# We are skipping testing phase, and suppress summary
opts.suppress_summary = True
single_test = SingleTestRunner(_global_loops_count=opts.test_global_loops_value,
_test_loops_list=opts.test_loops_list,
_muts=MUTs,
_clean=opts.clean,
_opts_db_url=opts.db_url,
_opts_log_file_name=opts.log_file_name,
_opts_report_html_file_name=opts.report_html_file_name,
_opts_report_junit_file_name=opts.report_junit_file_name,
_opts_report_build_file_name=opts.report_build_file_name,
_opts_report_text_file_name=opts.report_text_file_name,
_test_spec=test_spec,
_opts_goanna_for_mbed_sdk=opts.goanna_for_mbed_sdk,
_opts_goanna_for_tests=opts.goanna_for_tests,
_opts_shuffle_test_order=opts.shuffle_test_order,
_opts_shuffle_test_seed=opts.shuffle_test_seed,
_opts_test_by_names=opts.test_by_names,
_opts_peripheral_by_names=opts.peripheral_by_names,
_opts_test_only_peripheral=opts.test_only_peripheral,
_opts_test_only_common=opts.test_only_common,
_opts_verbose_skipped_tests=opts.verbose_skipped_tests,
_opts_verbose_test_result_only=opts.verbose_test_result_only,
_opts_verbose=opts.verbose,
_opts_firmware_global_name=opts.firmware_global_name,
_opts_only_build_tests=opts.only_build_tests,
_opts_parallel_test_exec=opts.parallel_test_exec,
_opts_suppress_summary=opts.suppress_summary,
_opts_test_x_toolchain_summary=opts.test_x_toolchain_summary,
_opts_copy_method=opts.copy_method,
_opts_mut_reset_type=opts.mut_reset_type,
_opts_jobs=opts.jobs,
_opts_waterfall_test=opts.waterfall_test,
_opts_consolidate_waterfall_test=opts.consolidate_waterfall_test,
_opts_extend_test_timeout=opts.extend_test_timeout,
_opts_auto_detect=opts.auto_detect)
# Runs test suite in CLI mode
if (singletest_in_cli_mode(single_test)):
exit(0)
else:
exit(-1)
| |
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from thing.models import * # NOPEP8
from thing.stuff import * # NOPEP8
MONTHS = (None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
FILTER_EXPECTED = {
'char': {
'label': 'Character',
'comps': ['eq', 'ne', 'in'],
'number': True,
},
'corp': {
'label': 'Corporation',
'comps': ['eq', 'ne', 'in'],
'number': True,
},
'client': {
'label': 'Client',
'comps': ['eq', 'ne', 'in'],
},
'date': {
'label': 'Date',
'comps': ['eq', 'bt'],
},
'item': {
'label': 'Item',
'comps': ['eq', 'ne', 'in'],
},
'total': {
'label': 'Total Amount',
'comps': ['eq', 'ne', 'gt', 'gte', 'lt', 'lte'],
'number': True,
},
}
@login_required
def transactions(request):
"""Transaction list"""
tt = TimerThing('transactions')
# Get profile
profile = request.user.profile
characters = Character.objects.filter(
apikeys__user=request.user,
apikeys__valid=True,
apikeys__key_type__in=[APIKey.ACCOUNT_TYPE, APIKey.CHARACTER_TYPE]
).distinct()
character_ids = [c.id for c in characters]
corporation_ids = Corporation.get_ids_with_access(request.user, APIKey.CORP_ASSET_LIST_MASK)
corporations = Corporation.objects.filter(
pk__in=corporation_ids
)
tt.add_time('init')
# Get a QuerySet of transactions by this user
transaction_ids = Transaction.objects.filter(
(
Q(character__in=character_ids)
&
Q(corp_wallet__isnull=True)
)
|
Q(corp_wallet__corporation__in=corporation_ids)
)
transaction_ids = transaction_ids.order_by('-date')
# Get a QuerySet of transactions IDs by this user
# characters = list(Character.objects.filter(apikeys__user=request.user.id).values_list('id', flat=True))
# transaction_ids = Transaction.objects.filter(character_id__in=characters)
# transaction_ids = transaction_ids.order_by('-date')
# Get only the ids, at this point joining the rest is unnecessary
transaction_ids = transaction_ids.values_list('pk', flat=True)
tt.add_time('transaction ids')
# Parse and apply filters
filters = parse_filters(request, FILTER_EXPECTED)
if 'char' in filters:
qs = []
for fc, fv in filters['char']:
if fc == 'eq':
qs.append(Q(character=fv))
elif fc == 'ne':
qs.append(~Q(character=fv))
transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs))
if 'corp' in filters:
qs = []
for fc, fv in filters['corp']:
if fc == 'eq':
qs.append(Q(corp_wallet__corporation=fv))
elif fc == 'ne':
qs.append(~Q(corp_wallet__corporation=fv))
transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs))
# Client is a special case that requires some extra queries
if 'client' in filters:
qs = []
for fc, fv in filters['client']:
if fc == 'eq':
qs.append(Q(name=fv))
elif fc == 'ne':
qs.append(~Q(name=fv))
elif fc == 'in':
qs.append(Q(name__icontains=fv))
qs_reduced = reduce(q_reduce_or, qs)
char_ids = list(Character.objects.filter(qs_reduced).values_list('id', flat=True))
corp_ids = list(Corporation.objects.filter(qs_reduced).values_list('id', flat=True))
transaction_ids = transaction_ids.filter(
Q(other_char_id__in=char_ids)
|
Q(other_corp_id__in=corp_ids)
)
if 'date' in filters:
qs = []
for fc, fv in filters['date']:
if fc == 'eq':
try:
start = datetime.datetime.strptime(fv, '%Y-%m-%d')
end = datetime.datetime.strptime('%s 23:59:59' % (fv), '%Y-%m-%d %H:%M:%S')
qs.append(Q(date__range=(start, end)))
except ValueError:
pass
elif fc == 'bt':
parts = fv.split(',')
if len(parts) == 2:
try:
start = datetime.datetime.strptime(parts[0], '%Y-%m-%d')
end = datetime.datetime.strptime('%s 23:59:59' % (parts[1]), '%Y-%m-%d %H:%M:%S')
if start < end:
qs.append(Q(date__range=(start, end)))
except ValueError:
pass
if qs:
transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs))
if 'item' in filters:
qs = []
for fc, fv in filters['item']:
if fc == 'eq':
qs.append(Q(item__name=fv))
elif fc == 'ne':
qs.append(~Q(item__name=fv))
elif fc == 'in':
qs.append(Q(item__name__icontains=fv))
transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs))
if 'total' in filters:
qs = []
for fc, fv in filters['total']:
if fc == 'eq':
if fv < 0:
qs.append(Q(buy_transaction=True, total_price=abs(fv)))
else:
qs.append(Q(buy_transaction=False, total_price=fv))
elif fc == 'ne':
qs.append(~Q(total_price=fv))
elif fc == 'gt':
if fv > 0:
qs.append(Q(buy_transaction=False, total_price__gt=fv))
else:
qs.append(
Q(buy_transaction=False, total_price__gt=abs(fv))
|
Q(buy_transaction=True, total_price__lt=abs(fv))
)
elif fc == 'gte':
if fv >= 0:
qs.append(Q(buy_transaction=False, total_price__gte=fv))
else:
qs.append(
Q(buy_transaction=False, total_price__gte=abs(fv))
|
Q(buy_transaction=True, total_price__lte=abs(fv))
)
elif fc == 'lt':
if fv > 0:
qs.append(
Q(buy_transaction=False, total_price__lt=fv)
|
Q(buy_transaction=True, total_price__gt=0)
)
else:
qs.append(Q(buy_transaction=True, total_price__gt=abs(fv)))
elif fc == 'lte':
if fv >= 0:
qs.append(
Q(buy_transaction=False, total_price__lte=fv)
|
Q(buy_transaction=True, total_price__gte=0)
)
else:
qs.append(Q(buy_transaction=True, total_price__gte=abs(fv)))
transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs))
tt.add_time('filters')
# Create a new paginator
paginator = Paginator(transaction_ids, profile.entries_per_page)
# If page request is out of range, deliver last page of results
try:
paginated = paginator.page(request.GET.get('page'))
except PageNotAnInteger:
# Page is not an integer, use first page
paginated = paginator.page(1)
except EmptyPage:
# Page is out of range, deliver last page
paginated = paginator.page(paginator.num_pages)
tt.add_time('paginator')
# Do page number things
hp = paginated.has_previous()
hn = paginated.has_next()
prev = []
next = []
if hp:
# prev and next, use 1 of each
if hn:
prev.append(paginated.previous_page_number())
next.append(paginated.next_page_number())
# no next, add up to 2 previous links
else:
for i in range(paginated.number - 1, 0, -1)[:2]:
prev.insert(0, i)
else:
# no prev, add up to 2 next links
for i in range(paginated.number + 1, paginator.num_pages)[:2]:
next.append(i)
# Build the transaction queryset now to avoid nasty subqueries
transactions = Transaction.objects.filter(pk__in=paginated)
transactions = transactions.select_related('corp_wallet__corporation', 'item', 'station', 'character', 'other_char', 'other_corp')
transactions = transactions.order_by('-date')
transactions = list(transactions)
tt.add_time('transactions')
# Build filter links, urgh
for transaction in transactions:
transaction.z_client_filter = build_filter(filters, 'client', 'eq', transaction.other_char or transaction.other_corp)
transaction.z_item_filter = build_filter(filters, 'item', 'eq', transaction.item.name)
tt.add_time('build links')
# Ready template things
values = {
'chars': characters,
'corps': corporations,
}
tt.add_time('template bits')
# Render template
out = render_page(
'thing/transactions.html',
{
'json_data': _json_data(characters, corporations, filters),
'transactions': transactions,
'show_item_icons': request.user.profile.show_item_icons,
'paginated': paginated,
'next': next,
'prev': prev,
'values': values,
},
request,
character_ids,
corporation_ids,
)
tt.add_time('template')
if settings.DEBUG:
tt.finished()
return out
def _json_data(characters, corporations, filters):
data = dict(
expected=FILTER_EXPECTED,
filters=filters,
values=dict(
char={},
corp={},
),
)
for char in characters:
data['values']['char'][char.id] = char.name.replace("'", ''')
for corp in corporations:
data['values']['corp'][corp.id] = corp.name.replace("'", ''')
return json.dumps(data)
| |
"""This module describes fully-functioning networks created from the pieces in `layer`.
"""
from __future__ import division, print_function
import collections
import inspect
import numpy as np
import six
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from ..data import files
from ..data import readers
from ..nnets import layers
from ..nnets import training
from ..util import misc
from ..util import netlog
log = netlog.setup_logging("nets", level="INFO")
def define_logistic_regression(n_classes, l1_reg=0, l2_reg=0):
"""Shortcut to build the list of layer definitions (a single layer,
in this case) for a logistic regression classifier.
Parameters
----------
n_classes : int
Number of classes to calculate probabilities for
l1_reg, l2_reg : float, optional
L1 and L2 regularization strengths
Returns
-------
list
Layer definitions suitable for input to a `NNClassifier`
"""
# This network is only an output layer.
layer_defs = [["ClassificationOutputLayer", {"n_classes": n_classes,
"l1": l1_reg, "l2": l2_reg}]]
return layer_defs
def define_cnn(n_classes, input_image_shape, n_kernels, filter_scale, poolsize,
n_hidden, dropout_p, activation="relu", l1_reg=0, l2_reg=0):
"""Shortcut to build the list of layer definitions for
a convolutional neural network
Defines a series of convolutional layers, followed by max-pooling layers,
after which a multi-layer perceptron calculates the probabilities of
membership in each class.
Parameters
----------
n_classes : int
Number of classes to calculate probabilities for
input_image_shape : list or tuple
Shape of input image, (n_channels, n_pixels_x, n_pixels_y)
n_kernels : list of ints
Number of convolutional kernels in each convolutional layer
filter_scale : list of ints
Size of (square) filters in each convolutional layer.
Must be the same length as `n_kernels`.
poolsize : list of ints
Size of (square) non-overlapping max-pooling kernel to be
applied after each convolutional layer (may be zero, meaning
no max pooling after that layer). Must be the same length
as `n_kernels`.
n_hidden : list of ints
Number of units in each hidden layer
dropout_p : float or list of floats
Dropout fraction for input and each hidden layer. If a single float,
this dropout fraction will be applied to every layer.
activation : {"relu", "prelu", "sigmoid", "tanh", "abstanh", "linear"}
Activation function to use for all layers
l1_reg, l2_reg : float, optional
L1 and L2 regularization strengths for all layers
Returns
-------
list
Layer definitions suitable for input to a `NNClassifier`
Examples
--------
>>> layers = define_cnn(10, (28, 28), n_kernels=[32, 32], filter_scale=[4, 3],
>>> poolsize=[0, 2], n_hidden=[400], dropout_p=0.2)
>>> print(layers)
[['InputImageLayer', {'n_images': 1, 'n_pixels': [28, 28], 'name': 'input'}],
['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-input'}],
['ConvLayer',
{'activation': 'relu',
'filter_shape': (4, 4),
'n_output_maps': 32,
'name': 'conv0'}],
['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-conv0'}],
['ConvLayer',
{'activation': 'relu',
'filter_shape': (3, 3),
'n_output_maps': 32,
'name': 'conv1'}],
['MaxPool2DLayer', {'name': 'maxpool1', 'pool_shape': (2, 2)}],
['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-conv1'}],
['FCLayer',
{'activation': 'relu', 'l1': 0, 'l2': 0, 'n_units': 400, 'name': 'fc0'}],
['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-fc0'}],
['ClassificationOutputLayer', {'l1': 0, 'l2': 0, 'n_classes': 10}]]
"""
# Assume input images are 2D. If the `input_image_shape` is 3 elements,
# the first element is the number of images in the input. Otherwise, assume
# that there's only one image in the input.
if len(input_image_shape) == 3:
pass
elif len(input_image_shape) == 2:
input_image_shape = [1] + list(input_image_shape)
else:
raise ValueError("The input image shape must be (n_channels, n_pixels_x, n_pixels_y).")
try:
# Make sure that `n_hidden` is a list.
len(n_hidden)
except TypeError:
n_hidden = [n_hidden]
try:
# Make sure that `dropout_p` is a list.
len(dropout_p)
except TypeError:
dropout_p = (1 + len(n_hidden) + len(n_kernels)) * [dropout_p]
if len(dropout_p) != len(n_kernels) + len(n_hidden) + 1:
raise ValueError("Either specify one dropout for all layers or one dropout for "
"each layer (inputs + hidden layers).")
dropout_p = dropout_p[::-1] # Pops come from the end, so reverse this list.
# Start by putting on the input layer.
layer_defs = [["InputImageLayer", {"name": "input", "n_images": input_image_shape[0],
"n_pixels": input_image_shape[1:]}]]
input_do = dropout_p.pop()
if input_do:
layer_defs.append(["DropoutLayer", {"name": "DO-input", "dropout_p": input_do}])
# Add convolutional layers.
for i_conv, (kernels, filter, pool) in enumerate(zip(n_kernels, filter_scale, poolsize)):
layer_defs.append(["ConvLayer", {"name": "conv{}".format(i_conv),
"n_output_maps": kernels,
"filter_shape": (filter, filter),
"activation": activation}])
if pool:
layer_defs.append(["MaxPool2DLayer", {"name": "maxpool{}".format(i_conv),
"pool_shape": (pool, pool)}])
layer_do = dropout_p.pop()
if layer_do:
layer_defs.append(["DropoutLayer", {"name": "DO-conv{}".format(i_conv),
"dropout_p": layer_do}])
# Add fully-connected layers.
for i_hidden, hidden in enumerate(n_hidden):
layer_defs.append(["FCLayer", {"name": "fc{}".format(i_hidden),
"n_units": hidden, "activation": activation,
"l1": l1_reg, "l2": l2_reg}])
layer_do = dropout_p.pop()
if layer_do:
layer_defs.append(["DropoutLayer", {"name": "DO-fc{}".format(i_hidden),
"dropout_p": layer_do}])
# Put on an output layer.
layer_defs.append(["ClassificationOutputLayer", {"n_classes": n_classes, "l1": l1_reg,
"l2": l2_reg}])
return layer_defs
def define_mlp(n_classes, n_hidden, dropout_p, activation="relu", l1_reg=0, l2_reg=0):
"""Shortcut to create a multi-layer perceptron classifier
Parameters
----------
n_classes : int
Number of classes to calculate probabilities for
n_hidden : list of ints
Number of units in each hidden layer
dropout_p : float or list of floats
Dropout fraction for input and each hidden layer. If a single float,
this dropout fraction will be applied to every layer.
activation : {"relu", "prelu", "sigmoid", "tanh", "abstanh", "linear"}
Activation function to use for all layers
l1_reg, l2_reg : float, optional
L1 and L2 regularization strengths for all layers
Returns
-------
list
Layer definitions suitable for input to a `NNClassifier`
Examples
--------
>>> layers = define_mlp(10, [400, 400], [0.4, 0.25, 0.25], "prelu", l2_reg=1e-4)
>>> print(layers)
[['DropoutLayer', {'dropout_p': 0.4, 'name': 'DO-input'}],
['FCLayer', {'activation': 'prelu', 'l1': 0, 'l2': 0.0001, 'n_units': 400, 'name': 'fc0'}],
['DropoutLayer', {'dropout_p': 0.25, 'name': 'DO-fc0'}],
['FCLayer', {'activation': 'prelu', 'l1': 0, 'l2': 0.0001, 'n_units': 400, 'name': 'fc1'}],
['DropoutLayer', {'dropout_p': 0.25, 'name': 'DO-fc1'}],
['ClassificationOutputLayer', {'l1': 0, 'l2': 0.0001, 'n_classes': 10, 'name': 'output'}]]
"""
try:
# Make sure that `n_hidden` is a list.
len(n_hidden)
except TypeError:
n_hidden = [n_hidden]
try:
# Make sure that `dropout_p` is a list.
len(dropout_p)
except TypeError:
dropout_p = (1 + len(n_hidden)) * [dropout_p]
if len(dropout_p) != len(n_hidden) + 1:
raise ValueError("Either specify one dropout for all layers or one dropout for "
"each layer (inputs + hidden layers).")
dropout_p = dropout_p[::-1] # Pops come from the end, so reverse this list.
# Start by putting on dropout for the input layer (if any).
layer_defs = []
input_do = dropout_p.pop()
if input_do:
layer_defs.append(["DropoutLayer", {"name": "DO-input", "dropout_p": input_do}])
# Add fully-connected layers.
for i_hidden, hidden in enumerate(n_hidden):
layer_defs.append(["FCLayer", {"name": "fc{}".format(i_hidden),
"n_units": hidden, "activation": activation,
"l1": l1_reg, "l2": l2_reg}])
layer_do = dropout_p.pop()
if layer_do:
layer_defs.append(["DropoutLayer", {"name": "DO-fc{}".format(i_hidden),
"dropout_p": layer_do}])
# Put on an output layer.
layer_defs.append(["ClassificationOutputLayer", {"name": "output", "n_classes": n_classes,
"l1": l1_reg, "l2": l2_reg}])
return layer_defs
class NNClassifier(object):
r"""A neural net to be used for a classification task.
The classification network is built from individual layers.
Compilation doesn't happen until necessary at training time.
This object can be pickled and unpickled; the entire state
of the object will be stored.
.. note:: After unpickling, the network will need to be compiled
(either through `fit` or by calling `compile` directly)
before it can be used.
Parameters
----------
layer_defs : list
Definition of the network layers. This should be a list of lists.
name : str, optional
Name of this neural network, for display purposes
n_in : int or tuple, optional
The shape of the input features. If supplied here, we'll initialize
the network layers now. Otherwise, this will be inferred from the
data supplied during a call to `fit` and the network layers will be
constructed at that time.
batch_size : int, optional
Batch size to be used for training. Only needed now if `n_in` is also
supplied -- it can be used to optimize convolutional layers on the CPU.
random_state : int or np.random.RandomState, optional
RNG or seed for a RNG. If not supplied, will be randomly initialized.
Other Parameters
----------------
stored_network : str, optional
Filename of pickled network. If supplied, initialize this object's
layers from weights stored in the `stored_network`. The pickled
network must have the same architecure as this network.
theano_rng : theano.tensor.shared_randomstreams import RandomStreams, optional
Symbolic random number generator. If not supplied, will be initialized
from the numpy RNG.
Attributes
----------
predict_proba : function
Input batch of examples, output probabilities of each class for each example.
Compiled by theano.
predict : function
Input batch of examples, output class with maximum probability for each example.
Compiled by theano.
layers_train : list
List of `Layer` objects. Potentially non-deterministic; used for training.
layers_inf : list
Network used for inference, deterministic. Identical architecture to and
shares parameters with `layers_train`.
params : list
All trainable parameters (theano shared variables) from this network
param_update_rules : list
All special update rules, one dictionary per parameter in `params`
n_params : int
Total number of individual trainable parameters
trainer : training.SupervisedTraining
Object used to train this network; present after calling `fit`
Examples
--------
>>> layers = [["FCLayer", {"name": "fc1", "n_units": 100, "activation": "relu", "l2": 0.001}],
["DropoutLayer", {"name": "DO-fc1", "dropout_p": 0.5}],
["ClassificationOutputLayer", {"name": "output", "n_classes": 10}]]
>>> cls = NNClassifier(layers, name="Small example net", random_state=42)
"""
def __init__(self, layer_defs, name="Neural Network Classifier", n_in=None,
batch_size=None, random_state=None, stored_network=None, theano_rng=None):
self.input = None
self.trainer = None
self.n_in = n_in
self.layer_defs = layer_defs
self.batch_size = batch_size
self.stored_network = stored_network
self.name = name
self.layers_train, self.layers_inf = [], []
self.l1, self.l2_sqr = 0, 0
self.params, self.param_update_rules, self.n_params = [], [], 0
if type(layer_defs) != list:
raise TypeError("Please input a list of layer definitions.")
self.set_rng(random_state, theano_rng) # Sets instance attributes `self.random_state` and `self.theano_rng`.
self.pickled_theano_rng = None # Use this to restore previous parameters.
# Define these Theano functions during the `compile` stage.
self.p_y_given_x = None
self.predict_proba = None
self.predict = None
if self.n_in is not None:
self._build_network(self.n_in, batch_size)
def _build_network(self, n_in, batch_size=None):
"""Create and store the layers of this network, along with auxiliary information such
as lists of the trainable parameters in the network."""
self.n_in = np.atleast_1d(n_in) # Make sure that `n_in` is a list or tuple.
if batch_size is not None:
self.batch_size = batch_size
# These next attributes are creating and storing Theano shared variables.
# The Layers contain shared variables for all the trainable parameters,
# and the regularization parameters are sums and products of the parameters.
self.layers_train = self._build_layers_train(self.layer_defs, self.stored_network)
self.layers_inf = self._duplicate_layer_stack(self.layers_train)
self.l1, self.l2_sqr = self._get_regularization(self.layers_train)
# Collect the trainable parameters from each layer and arrange them into lists.
self.params, self.param_update_rules, self.n_params = self._arrange_parameters(self.layers_train)
log.info("This network has {} trainable parameters.".format(self.n_params))
def _arrange_parameters(self, layers):
"""Extract all trainable parameters and any special update rules from each Layer.
Also calculate the total number of trainable parameters in this network.
Returns
-------
A 3-tuple of (parameters, parameter update rules, and number of parameters).
The first two elements are lists of equal length, and the number of parameters is
an integer.
"""
# The parameters of the model are the parameters of the two layers it is made out of.
params, param_update_rules = [], []
for ly in layers:
params += ly.params
param_update_rules += ly.param_update_rules
# Calculate the total number of trainable parameters in this network.
n_params = int(np.sum([np.sum([np.prod(param.get_value().shape) for param in layer.params])
for layer in layers if not getattr(layer, "fix_params", False)]))
return params, param_update_rules, n_params
def _get_regularization(self, layers):
"""Find the L1 and L2 regularization terms for this net. Combine the L1 and L2
terms from each Layer. Use the regularization strengths stored in each Layer.
Note that the value returned is `l2_sqr`, the sum of squares of all weights,
times the lambda parameter for each Layer.
Returns
-------
l1, l2_sqr : theano.shared
The `l1` is the sum of absolute values of weights times
lambda_l1 from each Layer, and `l2_sqr` is the sum of squares
of weights times lambda_l2 from each Layer.
"""
# L1 norm; one regularization option is to require the L1 norm to be small.
l1 = np.sum([ly.l1 for ly in layers if ly.l1 is not None])
if not l1:
log.debug("No L1 regularization in this model.")
l1 = theano.shared(np.cast[theano.config.floatX](0), "zero")
# Square of the L2 norm; one regularization option is to require the
# square of the L2 norm to be small.
l2_sqr = np.sum([ly.l2_sqr for ly in layers if ly.l2_sqr is not None])
if not l2_sqr:
log.debug("No L2 regularization in this model.")
l2_sqr = theano.shared(np.cast[theano.config.floatX](0), "zero")
return l1, l2_sqr
def _build_layers_train(self, layer_defs, stored_network=None):
"""Creates a stack of neural network layers from the input layer definitions.
This network is intended for use in training.
**Parameters**
* `layer_defs` <list>
A list of Layer definitions. May contain Layers, in which case they're added
directly to the list of output Layers.
**Optional Parameters**
* `stored_network` <str|None>
A filename containing a previously stored neural network. If any layer definitions
specify that they should be initialized with weights from an existing network,
use the weights in the `stored_network`.
**Returns**
A list of initialized (but not compiled) neural network Layers.
**Modifies**
None
"""
if stored_network is not None:
log.info('Reading weights from an existing network at "{}".'.format(stored_network))
stored_network = collections.OrderedDict(files.read_pickle(stored_network)["params"])
log.info("Building the \"{}\" network.".format(self.name))
if isinstance(layer_defs[0], layers.InputLayer):
layer_objs = []
else:
# Initialize the layers with an input layer, if we don't have one already.
layer_objs = [layers.InputLayer(self.n_in, name="input")]
for ly in layer_defs:
if isinstance(ly, layers.Layer):
# If this is already a Layer object, don't try to re-create it.
layer_objs.append(ly)
else:
prev_ly = layer_objs[-1]
if len(ly) == 1:
ly.append({}) # No extra layer arguments.
layer_name = ly[0]
if not layer_name.endswith("Layer"):
# All class names end with "Layer".
layer_name += "Layer"
if ((layer_name.startswith("BC01ToC01B") or layer_name.startswith("C01BToBC01"))
and theano.config.device == "cpu"):
log.warning("Skipping \"{}\" reshuffling layer for "
"CPU training.".format(layer_name))
continue
layer_kwargs = ly[1].copy()
init_from = layer_kwargs.pop("load_params", False)
if init_from:
if init_from not in stored_network:
raise ValueError("Couldn't find weights for layer {} in the input "
"weights.".format(init_from))
layer_type = getattr(layers, layer_name)
if "batch_size" in inspect.getargspec(layer_type.__init__).args:
layer_kwargs.setdefault("batch_size", self.batch_size)
layer_objs.append(layer_type(n_in=prev_ly.n_out, rng=self.rng,
theano_rng=self.theano_rng, **layer_kwargs))
log.info("Added layer: {}".format(str(layer_objs[-1])))
if init_from:
# Copy weights from the input file into this layer.
for param, input_params in zip(layer_objs[-1].params,
stored_network[init_from]):
param.set_value(input_params[1], borrow=True)
log.info("Copied input parameters from layer {} to layer "
"{}.".format(init_from, layer_objs[-1].name))
return layer_objs
def _duplicate_layer_stack(self, layer_stack):
"""Creates a stack of neural network Layers identical to the input `layer_stack`, and
with weights tied to those Layers. This is useful to, for example, create a parallel
network to be used for inference.
**Parameters**
* `layer_stack` <list of Layers>
A list of initialized Layers.
**Returns**
A list of initialized (but not compiled) neural network Layers.
**Modifies**
None
"""
layer_objs = []
for i_ly, ly in enumerate(layer_stack):
layer_type = type(ly)
layer_kwargs = ly.get_params()
# Construct a parallel network for inference. Tie the weights to the training network.
layer_kwargs.update(layer_stack[i_ly].get_trainable_params())
layer_objs.append(layer_type(rng=self.rng, theano_rng=self.theano_rng, **layer_kwargs))
return layer_objs
def get_loss(self, name, targets=None, inference=False, regularized=None):
"""Return a loss function.
Parameters
----------
name : str
Name of the loss function. One of ["nll", "error"]. May
also be a list, in which case this function will return
a list of loss functions.
targets : theano symbolic variable, optional
If None, will be initialized to a T.imatrix named "y".
inference : bool, optional
If True, return the loss from the inference network (for
e.g. model validation). Otherwise use the training network.
regularized : bool, optional
Add regularization parameters to the loss? Default to True
if `inference` is False and False if `inference` is True.
Returns
-------
Theano symbolic variable
Represents the requested loss, or a list of symbolic
variables if `name` is list-like.
"""
if self.input is None:
raise RuntimeError("Compile this network before getting a loss function.")
if regularized is None:
regularized = not inference
# If we got a list as input, return a list of loss functions.
if misc.is_listlike(name):
return [self.get_loss(n, targets=targets, inference=inference, regularized=regularized)
for n in name]
input_name = name
name = name.lower()
if name == "nll":
name = "negative_log_likelihood"
name = name.replace(" ", "_")
if inference:
output_layer = self.layers_inf[-1]
else:
output_layer = self.layers_train[-1]
# Look for the cost function in the output layer.
if not hasattr(output_layer, name):
raise ValueError("Unrecognized loss function: \"{}\".".format(input_name))
if targets is None:
targets = T.imatrix("y") # Labels, presented as 2D array of [int] labels
loss = getattr(output_layer, name)(targets)
if regularized:
loss = loss + self.l1 + self.l2_sqr
return loss
def compile(self, input, recompile=False):
"""Compile the theano computation graphs and functions
associated with this network.
Parameters
----------
input : Theano symbolic variable
The input to the network
recompile : bool, optional
If False, will not recompile an already-compiled network.
"""
if self.input is not None:
if recompile:
log.warning("Recompiling and resetting the existing network.")
else:
log.debug("This object already compiled. Not recompiling.")
return
self.input = input
log.info("Compiling the \"{}\" training network.".format(self.name))
prev_output = input
for ly in self.layers_train:
ly.compile(prev_output)
ly.compile_activations(self.input)
prev_output = ly.output
log.info("Compiling the \"{}\" inference network.".format(self.name))
prev_output = input
for ly in self.layers_inf:
ly.compile(prev_output)
ly.compile_activations(self.input)
prev_output = ly.output_inf
# Allow predicting on fresh features.
self.p_y_given_x = self.layers_inf[-1].p_y_given_x
self.predict_proba = theano.function(inputs=[self.input], outputs=self.p_y_given_x)
self.predict = theano.function(inputs=[self.input], outputs=self.layers_inf[-1].y_pred)
# Now that we've compiled the network, we can restore a previous
# Theano RNG state, if any. The "pickled_theano_rng" will only be
# non-None if this object was unpickled.
self._set_theano_rng(self.pickled_theano_rng)
self.pickled_theano_rng = None
def get_init_params(self):
return dict(n_in=self.n_in, layer_defs=self.layer_defs,
name=self.name, batch_size=self.batch_size,
stored_network=self.stored_network)
def set_trainable_params(self, inp, layers=None):
"""Set the trainable parameters in this network from trainable
parameters in an input.
Parameters
----------
inp : NNClassifier or string
May be an existing NNClassifier, or a filename
pointing to either a checkpoint or a pickled NNClassifier.
layers : list of strings, optional
If provided, set parameters only for the layers with these
names, using layers with corresponding names in the input.
"""
# Get the input and check its type.
# If the input is a string, try reading it first as a
# checkpoint file, and then as a NNClassifier pickle.
if isinstance(inp, six.string_types):
try:
inp = files.checkpoint_read(inp, get_metadata=False)
except files.CheckpointError as err:
inp = files.read_pickle(inp)
if not isinstance(inp, NNClassifier):
raise TypeError("Unable to restore weights from a \"{}\" object.".format(type(inp)))
# Go through each layer in this object and set its weights.
for ly in self.layers_train:
if layers is not None and ly not in layers:
continue
if ly.has_trainable_params:
ly.set_trainable_params(inp.get_layer(ly.name))
log.debug("Set trainable parameters in layer {} "
"from input weights.".format(ly.name))
def get_layer(self, name, inf=False):
"""Returns the Layer object with the given name.
Parameters
----------
name : str
Name of the desired Layer object
inf : bool, optional
If True, search the inference (deterministic) Layers,
otherwise search the training Layers.
"""
layers = self.layers_inf if inf else self.layers_train
for ly in layers:
if ly.name == name:
return ly
else:
raise ValueError("Layer \"{}\" is not present in "
"network \"{}\".".format(name, self.name))
def set_rng(self, rng, theano_rng=None):
"""Set the pseudo-random number generator in this object
and in all Layers of this object.
Parameters
----------
rng : int or numpy.random.RandomState or `RandomState.get_state()`
theano_rng : theano.tensor.shared_randomstreams import RandomStreams, optional
If not supplied, will be initialized from the `rng`
Modifies
--------
`self.rng` and `self.theano_rng` will be set with RNGs.
Each Layer in `self.layers_train` and `self.layers_inf` will have their RNGs set
to be the same objects as this network's new RNGs.
"""
# Set up the random number generator, if necessary.
if rng is None:
log.debug("Making new NNet RNG")
rng = np.random.RandomState()
elif isinstance(rng, int):
# If we got a seed as input.
log.debug("Setting RNG seed to {}.".format(rng))
rng = np.random.RandomState(rng)
elif not isinstance(rng, np.random.RandomState):
# Assume that anything else is the state of the RNG.
log.debug("Initializing numpy RNG from previous state.")
rng_state = rng
rng = np.random.RandomState()
rng.set_state(rng_state)
if theano_rng is None:
log.debug("Initializing new Theano RNG.")
theano_rng = RandomStreams(rng.randint(2 ** 30))
self.rng = rng
self.theano_rng = theano_rng
for ly in self.layers_train + self.layers_inf:
ly.rng = self.rng
ly.theano_rng = self.theano_rng
def _set_theano_rng(self, rng_state=None):
"""Set the current state of the theano_rng from a pickled state.
.. note:: This can only be done after compiling the network! The Theano
RNG needs to see where it fits in to the graph.
http://deeplearning.net/software/theano/tutorial/examples.html#copying-random-state-between-theano-graphs
"""
if rng_state is not None:
for (su, input_su) in zip(self.theano_rng.state_updates, rng_state):
su[0].set_value(input_su)
def __getstate__(self):
"""Preserve the object's state.
Don't try to pickle the Theano objects directly;
Theano changes quickly. Store the values of layer weights
as arrays instead (handled in the Layers' __getstate__ functions)
and clear all compiled functions and symbolic variables.
Those will need to be re-compiled before the network can be used again.
"""
state = self.__dict__.copy()
state["p_y_given_x"], state["predict_proba"], state["predict"] = None, None, None
state["l1"], state["l2_sqr"] = None, None
state["params"], state["param_update_rules"] = None, None
state["layers_inf"] = [] # This is redundant with `layers_train`; don't save both.
state["rng"] = self.rng.get_state()
state["input"] = None
# http://deeplearning.net/software/theano/tutorial/examples.html#copying-random-state-between-theano-graphs
state["pickled_theano_rng"] = [su[0].get_value() for su in self.theano_rng.state_updates]
state["theano_rng"] = None
return state
def __setstate__(self, state):
"""Allow unpickling from stored weights.
"""
self.__dict__.update(state)
# Reconstruct this object's RNG.
# The theano_rng won't be completely reconstructed until we recompile the network.
self.set_rng(self.rng, self.theano_rng)
# Rebuild everything we had to take apart before saving. Note that we'll
# still need to call `compile` to make the network fully operational again.
self.layers_inf = self._duplicate_layer_stack(self.layers_train)
self.l1, self.l2_sqr = self._get_regularization(self.layers_train)
# Collect the trainable parameters from each layer and arrange them into lists.
self.params, self.param_update_rules, self.n_params = self._arrange_parameters(self.layers_train)
def fit(self, X, y=None, valid=None, test=None,
n_epochs=None, batch_size=None,
augmentation=None, checkpoint=None,
sgd_type="adadelta", lr_rule=None,
momentum_rule=None, sgd_max_grad_norm=None,
train_loss="nll", valid_loss="nll", test_loss=["error", "nll"],
validation_frequency=None, validate_on_train=False,
checkpoint_all=False, extra_metadata=None,):
"""Perform supervised training on the input data.
When restoring a pickled `NNClassifier` object to resume training,
data, augmentation functions, and checkpoint locations must be
re-entered, but other parameters will be taken from the previously
stored training state. (The `n_epochs` may be re-supplied to alter
the number of epochs used, but will default to the previously
supplied `n_epochs`.)
Training may be stopped early by pressing ctrl-C.
Training data may be provided in either of the following formats:
- An array of (n_examples, n_features) in the first positional
argument (keyed by `X`), and an array of (n_examples, n_labels)
in the second positional argument (keyed by `y`)
- An object of type `readers.DataWithHoldoutParitions` or `readers.Data`
presented in the first positional argument
Validation data may be optionally supplied with the `valid` key
in one of the following formats (only if the training data were not
given as a `readers.DataWithHoldoutParitions` object):
- A tuple of (X, y), where `X` is an array of
(n_validation_examples, n_features) and `y` is an array of
(n_validation_examples, n_labels)
- A `readers.Data` object
- A float in the range [0, 1), in which case validation data will
be held out from the supplied training data (only if training
data were given as an array)
Test data may be optionally supplied with the `test` key, using the same
formats as for validation data.
Parameters
----------
X, y, valid, test
See above for discussion of allowed input formats.
n_epochs : int
Train for this many epochs. (An "epoch" is one complete pass through
the training data.) Must be supplied unless resuming training.
batch_size : int
Number of examples in a minibatch. Must be provided if was
not given during object construction.
augmentation : function, optional
Apply this function to each minibatch of training data.
checkpoint : str, optional
Filename for storing network during training. If supplied,
Arignote will store the network after every epoch, as well
as storing the network with the best validation loss and
the final network. When using a checkpoint, the trainer
will restore the network with best validation loss at the
end of training.
sgd_type : {"adadelta", "nag", "adagrad", "rmsprop", "sgd"}
Choice for stochastic gradient descent algorithm to use in training
lr_rule, momentum_rule : dict of sgd_updates.Rule params, optional
Use these dictionaries of parameters to create Rule objects
which describe how to alter the learning rate and momentum
during training.
train_loss, valid_loss : {"nll", "error"}
Loss function for training and validation. With a custom
output layer, may also be the name of a function which returns
a theano symbolic variable giving the cost.
("nll" = "negative log likelihood")
test_loss : str or list
May be any of the loss functions usable for training, or
a list of such functions.
Other Parameters
----------------
sgd_max_grad_norm : float, optional
If provided, scale gradients during training so that the norm
of all gradients is no more than this value.
validation_frequency : int, optional
Check the validation loss after training on this many examples.
Defaults to validating once per epoch.
validate_on_train : bool, optional
If set, calculate validation loss (using the deterministic
network) on the training set as well.
checkpoint_all : str, optional
Keep the state of the network at every training step.
Warning: may use lots of hard drive space.
extra_metadata : dict, optional
Store these keys with the pickled object.
Returns
-------
self : NNClassifier
Examples
--------
>>> lr_rule = {"rule": "stalled", "initial_value": 0.1, "multiply_by": 0.25, "interval": 5}
>>> momentum_rule = {"rule": "stalled", "initial_value": 0.7, "decrease_by": -0.1,
"final_value": 0.95, "interval": 5}
>>> mnist_data = files.read_pickle(sample_data.mnist)
>>> classifier.fit(mnist_data[0], n_epochs=50, valid=mnist_data[1], test=mnist_data[2],
augmentation=None, checkpoint=checkpoint, sgd_type="nag",
lr_rule=lr_rule, momentum_rule=momentum_rule, batch_size=128,
train_loss="nll", valid_loss="nll", test_loss=["nll", "error"])
"""
if batch_size is None:
batch_size = self.batch_size
# If the inputs are not `Data` objects, we need to wrap them before
# the Trainer can make use of them.
train_data = X if y is None else (X, y)
train, valid, test = readers.to_data_partitions(train_data, valid, test, batch_size=batch_size)
# If we didn't previously know how many features to expect in the input, we can now
# build the layers of this neural network.
if self.n_in is None:
self._build_network(train.features.shape, batch_size=batch_size)
if self.trainer is not None:
trainer = self.trainer
else:
trainer = training.SupervisedTraining(sgd_type=sgd_type,
lr_rule=lr_rule,
momentum_rule=momentum_rule,
sgd_max_grad_norm=sgd_max_grad_norm,
max_epochs=n_epochs,
validation_frequency=validation_frequency,
validate_on_train=validate_on_train,
train_loss=train_loss,
valid_loss=valid_loss,
test_loss=test_loss)
self.trainer = trainer
trained_network = trainer.fit(self, train, n_epochs=n_epochs, valid=valid, test=test,
augmentation=augmentation, extra_metadata=extra_metadata,
checkpoint=checkpoint, checkpoint_all=checkpoint_all)
return trained_network
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import date
import re
from random import randint
import string
class InstanceNames:
"Class that allows easy setting of FontLab name fields. TODO: Add proper italic flags"
foundry = ""
build = "0000"
version = "1.0"
year = date.today().year
designer = "Christian Robertson"
license = "Licensed under the Apache License, Version 2.0"
licenseURL = "http://www.apache.org/licenses/LICENSE-2.0"
def __init__(self,names):
if type(names) == type(" "):
names = names.split("/")
#print names
self.longfamily = names[0]
self.longstyle = names[1]
self.shortstyle = names[2]
self.subfamilyAbbrev = names[3]
self.width = self._getWidth()
self.italic = self._getItalic()
self.weight = self._getWeight()
self.fullname = "%s %s" %(self.longfamily, self.longstyle)
self.postscript = re.sub(' ','', self.longfamily) + "-" + re.sub(' ','',self.longstyle)
# if self.subfamilyAbbrev != "" and self.subfamilyAbbrev != None and self.subfamilyAbbrev != "Rg":
# self.shortfamily = "%s %s" %(self.longfamily, self.subfamilyAbbrev)
# else:
# self.shortfamily = self.longfamily
self.shortfamily = self.longfamily
def setRFNames(self,f, version=1, versionMinor=0):
f.info.familyName = self.longfamily
f.info.styleName = self.longstyle
f.info.styleMapFamilyName = self.shortfamily
f.info.styleMapStyleName = self.shortstyle.lower()
f.info.versionMajor = version
f.info.versionMinor = versionMinor
f.info.year = self.year
f.info.copyright = "Font data copyright %s %s" %(self.foundry, self.year)
f.info.trademark = "%s is a trademark of %s." %(self.longfamily, self.foundry)
f.info.openTypeNameDesigner = "Christian Robertson"
f.info.openTypeNameDesignerURL = self.foundry + ".com"
f.info.openTypeNameManufacturer = self.foundry
f.info.openTypeNameManufacturerURL = self.foundry + ".com"
f.info.openTypeNameLicense = self.license
f.info.openTypeNameLicenseURL = self.licenseURL
f.info.openTypeNameVersion = "%i.%i" %(version,versionMinor)
f.info.openTypeNameUniqueID = "%s:%s:%s" %(self.foundry, self.longfamily, self.year)
# f.info.openTypeNameDescription = ""
# f.info.openTypeNameCompatibleFullName = ""
# f.info.openTypeNameSampleText = ""
if (self.subfamilyAbbrev != "Rg"):
f.info.openTypeNamePreferredFamilyName = self.longfamily
f.info.openTypeNamePreferredSubfamilyName = self.longstyle
f.info.macintoshFONDName = re.sub(' ','',self.longfamily) + " " + re.sub(' ','',self.longstyle)
if self.italic:
f.info.italicAngle = -12.0
def setFLNames(self,flFont):
from FL import NameRecord
flFont.family_name = self.shortfamily
flFont.mac_compatible = self.fullname
flFont.style_name = self.longstyle
flFont.full_name = self.fullname
flFont.font_name = self.postscript
flFont.font_style = self._getStyleCode()
flFont.menu_name = self.shortfamily
flFont.apple_name = re.sub(' ','',self.longfamily) + " " + re.sub(' ','',self.longstyle)
flFont.fond_id = randint(1000,9999)
flFont.pref_family_name = self.longfamily
flFont.pref_style_name = self.longstyle
flFont.weight = self.weight
flFont.weight_code = self._getWeightCode(self.weight)
flFont.width = self.width
if len(self.italic):
flFont.italic_angle = -12
fn = flFont.fontnames
fn.clean()
#fn.append(NameRecord(0,1,0,0, "Font data copyright %s %s" %(self.foundry, self.year) ))
#fn.append(NameRecord(0,3,1,1033, "Font data copyright %s %s" %(self.foundry, self.year) ))
fn.append(NameRecord(0,1,0,0, "Copyright %s %s Inc. All Rights Reserved." %(self.year, self.foundry) ))
fn.append(NameRecord(0,3,1,1033, "Copyright %s %s Inc. All Rights Reserved." %(self.year, self.foundry) ))
fn.append(NameRecord(1,1,0,0, self.longfamily ))
fn.append(NameRecord(1,3,1,1033, self.shortfamily ))
fn.append(NameRecord(2,1,0,0, self.longstyle ))
fn.append(NameRecord(2,3,1,1033, self.longstyle ))
#fn.append(NameRecord(3,1,0,0, "%s:%s:%s" %(self.foundry, self.longfamily, self.year) ))
#fn.append(NameRecord(3,3,1,1033, "%s:%s:%s" %(self.foundry, self.longfamily, self.year) ))
fn.append(NameRecord(3,1,0,0, "%s:%s:%s" %(self.foundry, self.fullname, self.year) ))
fn.append(NameRecord(3,3,1,1033, "%s:%s:%s" %(self.foundry, self.fullname, self.year) ))
fn.append(NameRecord(4,1,0,0, self.fullname ))
fn.append(NameRecord(4,3,1,1033, self.fullname ))
#fn.append(NameRecord(5,1,0,0, "Version %s%s; %s" %(self.version, self.build, self.year) ))
#fn.append(NameRecord(5,3,1,1033, "Version %s%s; %s" %(self.version, self.build, self.year) ))
fn.append(NameRecord(5,1,0,0, "Version %s; %s" %(self.version, self.year) ))
fn.append(NameRecord(5,3,1,1033, "Version %s; %s" %(self.version, self.year) ))
fn.append(NameRecord(6,1,0,0, self.postscript ))
fn.append(NameRecord(6,3,1,1033, self.postscript ))
fn.append(NameRecord(7,1,0,0, "%s is a trademark of %s." %(self.longfamily, self.foundry) ))
fn.append(NameRecord(7,3,1,1033, "%s is a trademark of %s." %(self.longfamily, self.foundry) ))
fn.append(NameRecord(9,1,0,0, self.foundry ))
fn.append(NameRecord(9,3,1,1033, self.foundry ))
fn.append(NameRecord(11,1,0,0, self.foundry + ".com" ))
fn.append(NameRecord(11,3,1,1033, self.foundry + ".com" ))
fn.append(NameRecord(12,1,0,0, self.designer ))
fn.append(NameRecord(12,3,1,1033, self.designer ))
fn.append(NameRecord(13,1,0,0, self.license ))
fn.append(NameRecord(13,3,1,1033, self.license ))
fn.append(NameRecord(14,1,0,0, self.licenseURL ))
fn.append(NameRecord(14,3,1,1033, self.licenseURL ))
if (self.subfamilyAbbrev != "Rg"):
fn.append(NameRecord(16,3,1,1033, self.longfamily ))
fn.append(NameRecord(17,3,1,1033, self.longstyle))
#else:
#fn.append(NameRecord(17,3,1,1033,""))
#fn.append(NameRecord(18,1,0,0, re.sub("Italic","It", self.fullname)))
def _getSubstyle(self, regex):
substyle = re.findall(regex, self.longstyle)
if len(substyle) > 0:
return substyle[0]
else:
return ""
def _getItalic(self):
return self._getSubstyle(r"Italic|Oblique|Obliq")
def _getWeight(self):
w = self._getSubstyle(r"Extrabold|Superbold|Super|Fat|Black|Bold|Semibold|Demibold|Medium|Light|Thin")
if w == "":
w = "Regular"
return w
def _getWidth(self):
w = self._getSubstyle(r"Condensed|Extended|Narrow|Wide")
if w == "":
w = "Normal"
return w
def _getStyleCode(self):
#print "shortstyle:", self.shortstyle
styleCode = 0
if self.shortstyle == "Bold":
styleCode = 32
if self.shortstyle == "Italic":
styleCode = 1
if self.shortstyle == "Bold Italic":
styleCode = 33
if self.longstyle == "Regular":
styleCode = 64
return styleCode
def _getWeightCode(self,weight):
if weight == "Thin":
return 250
elif weight == "Light":
return 300
elif weight == "Bold":
return 700
elif weight == "Medium":
return 500
elif weight == "Semibold":
return 600
elif weight == "Black":
return 900
elif weight == "Fat":
return 900
return 400
def setNames(f,names,foundry="",version="1.0",build="0000"):
InstanceNames.foundry = foundry
InstanceNames.version = version
InstanceNames.build = build
i = InstanceNames(names)
i.setFLNames(f)
def setNamesRF(f, names, foundry="", version="1.0"):
InstanceNames.foundry = foundry
i = InstanceNames(names)
version, versionMinor = [int(num) for num in version.split(".")]
i.setRFNames(f, version=version, versionMinor=versionMinor)
| |
import decimal
import numpy as np
import pandas as pd
import pytest
import ibis
import ibis.common.exceptions as com
import ibis.util as util
from ibis import literal as L
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
],
)
@pytest.mark.xfail_unsupported
def test_fillna_nullif(backend, con, expr, expected):
if expected is None:
# The exact kind of null value used differs per backend (and version).
# Example 1: Pandas returns np.nan while BigQuery returns None.
# Example 2: PySpark returns np.nan if pyspark==3.0.0, but returns None
# if pyspark <=3.0.0.
# TODO: Make this behavior consistent (#2365)
assert pd.isna(con.execute(expr))
else:
assert con.execute(expr) == expected
@pytest.mark.only_on_backends(['pandas', 'dask', 'pyspark'])
def test_isna(backend, alltypes):
table = alltypes.mutate(na_col=np.nan)
table = table.mutate(none_col=None)
table = table.mutate(none_col=table['none_col'].cast('float64'))
table_pandas = table.execute()
for col in ['na_col', 'none_col']:
result = table[table[col].isnan()].execute().reset_index(drop=True)
expected = table_pandas[table_pandas[col].isna()].reset_index(
drop=True
)
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas', 'dask', 'pyspark'])
def test_fillna(backend, alltypes):
table = alltypes.mutate(na_col=np.nan)
table = table.mutate(none_col=None)
table = table.mutate(none_col=table['none_col'].cast('float64'))
table_pandas = table.execute()
for col in ['na_col', 'none_col']:
result = (
table.mutate(filled=table[col].fillna(0.0))
.execute()
.reset_index(drop=True)
)
expected = table_pandas.assign(
filled=table_pandas[col].fillna(0.0)
).reset_index(drop=True)
backend.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.coalesce(5, None, 4), 5),
(ibis.coalesce(ibis.NA, 4, ibis.NA), 4),
(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),
],
)
@pytest.mark.xfail_unsupported
def test_coalesce(backend, con, expr, expected):
result = con.execute(expr)
if isinstance(result, decimal.Decimal):
# in case of Impala the result is decimal
# >>> decimal.Decimal('5.56') == 5.56
# False
assert result == decimal.Decimal(str(expected))
else:
assert result == expected
@pytest.mark.skip_backends(['dask']) # TODO - identicalTo - #2553
@pytest.mark.xfail_unsupported
def test_identical_to(backend, alltypes, con, sorted_df):
sorted_alltypes = alltypes.sort_by('id')
df = sorted_df
dt = df[['tinyint_col', 'double_col']]
ident = sorted_alltypes.tinyint_col.identical_to(
sorted_alltypes.double_col
)
expr = sorted_alltypes['id', ident.name('tmp')].sort_by('id')
result = expr.execute().tmp
expected = (dt.tinyint_col.isnull() & dt.double_col.isnull()) | (
dt.tinyint_col == dt.double_col
)
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('column', 'elements'),
[
('int_col', [1, 2, 3]),
('int_col', (1, 2, 3)),
('string_col', ['1', '2', '3']),
('string_col', ('1', '2', '3')),
('int_col', {1}),
('int_col', frozenset({1})),
],
)
@pytest.mark.xfail_unsupported
def test_isin(backend, alltypes, sorted_df, column, elements):
sorted_alltypes = alltypes.sort_by('id')
expr = sorted_alltypes[
'id', sorted_alltypes[column].isin(elements).name('tmp')
].sort_by('id')
result = expr.execute().tmp
expected = sorted_df[column].isin(elements)
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('column', 'elements'),
[
('int_col', [1, 2, 3]),
('int_col', (1, 2, 3)),
('string_col', ['1', '2', '3']),
('string_col', ('1', '2', '3')),
('int_col', {1}),
('int_col', frozenset({1})),
],
)
@pytest.mark.xfail_unsupported
def test_notin(backend, alltypes, sorted_df, column, elements):
sorted_alltypes = alltypes.sort_by('id')
expr = sorted_alltypes[
'id', sorted_alltypes[column].notin(elements).name('tmp')
].sort_by('id')
result = expr.execute().tmp
expected = ~sorted_df[column].isin(elements)
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('predicate_fn', 'expected_fn'),
[
(lambda t: t['bool_col'], lambda df: df['bool_col']),
(lambda t: ~t['bool_col'], lambda df: ~df['bool_col']),
],
)
@pytest.mark.skip_backends(['dask']) # TODO - sorting - #2553
@pytest.mark.xfail_unsupported
def test_filter(backend, alltypes, sorted_df, predicate_fn, expected_fn):
sorted_alltypes = alltypes.sort_by('id')
table = sorted_alltypes[predicate_fn(sorted_alltypes)].sort_by('id')
result = table.execute()
expected = sorted_df[expected_fn(sorted_df)]
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['dask', 'pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_filter_with_window_op(backend, alltypes, sorted_df):
sorted_alltypes = alltypes.sort_by('id')
table = sorted_alltypes
window = ibis.window(group_by=table.id)
table = table.filter(lambda t: t['id'].mean().over(window) > 3).sort_by(
'id'
)
result = table.execute()
expected = (
sorted_df.groupby(['id'])
.filter(lambda t: t['id'].mean() > 3)
.reset_index(drop=True)
)
backend.assert_frame_equal(result, expected)
@pytest.mark.xfail_unsupported
def test_case_where(backend, alltypes, df):
table = alltypes
table = table.mutate(
new_col=(
ibis.case()
.when(table['int_col'] == 1, 20)
.when(table['int_col'] == 0, 10)
.else_(0)
.end()
.cast('int64')
)
)
result = table.execute()
expected = df.copy()
mask_0 = expected['int_col'] == 1
mask_1 = expected['int_col'] == 0
expected['new_col'] = 0
expected.loc[mask_0, 'new_col'] = 20
expected.loc[mask_1, 'new_col'] = 10
expected['new_col'] = expected['new_col']
backend.assert_frame_equal(result, expected)
# Pr 2635
@pytest.mark.xfail_unsupported
@pytest.mark.skip_backends(['postgres'])
def test_select_filter_mutate(backend, alltypes, df):
"""Test that select, filter and mutate are executed in right order.
Before Pr 2635, try_fusion in analysis.py would fuse these operations
together in a way that the order of the operations were wrong. (mutate
was executed before filter).
"""
t = alltypes
# Prepare the float_col so that filter must execute
# before the cast to get the correct result.
t = t.mutate(
float_col=ibis.case()
.when(t['bool_col'], t['float_col'])
.else_(np.nan)
.end()
)
# Actual test
t = t[t.columns]
t = t[~t['float_col'].isnan()]
t = t.mutate(float_col=t['float_col'].cast('int32'))
result = t.execute()
expected = df.copy()
expected.loc[~df['bool_col'], 'float_col'] = None
expected = expected[~expected['float_col'].isna()]
expected = expected.assign(float_col=expected['float_col'].astype('int32'))
backend.assert_frame_equal(result, expected)
def test_fillna_invalid(alltypes):
with pytest.raises(
com.IbisTypeError, match=r"value \['invalid_col'\] is not a field in.*"
):
alltypes.fillna({'invalid_col': 0.0})
def test_dropna_invalid(alltypes):
with pytest.raises(
com.IbisTypeError, match=r"value 'invalid_col' is not a field in.*"
):
alltypes.dropna(subset=['invalid_col'])
with pytest.raises(ValueError, match=r".*is not in.*"):
alltypes.dropna(how='invalid')
@pytest.mark.parametrize(
'replacements',
[
0.0,
0,
1,
({'na_col': 0.0}),
({'na_col': 1}),
({'none_col': 0.0}),
({'none_col': 1}),
],
)
@pytest.mark.only_on_backends(['pandas', 'dask', 'pyspark'])
def test_fillna_table(backend, alltypes, replacements):
table = alltypes.mutate(na_col=np.nan)
table = table.mutate(none_col=None)
table = table.mutate(none_col=table['none_col'].cast('float64'))
table_pandas = table.execute()
result = table.fillna(replacements).execute().reset_index(drop=True)
expected = table_pandas.fillna(replacements).reset_index(drop=True)
# check_dtype is False here because there are dtype diffs between
# Pyspark and Pandas on Java 8 - filling the 'none_col' with an int
# results in float in Pyspark, and int in Pandas. This diff does
# not exist in Java 11.
backend.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
('how', 'subset'),
[
('any', None),
('any', []),
('any', ['int_col', 'na_col']),
('all', None),
('all', ['int_col', 'na_col']),
('all', 'none_col'),
],
)
@pytest.mark.only_on_backends(['pandas', 'dask', 'pyspark'])
def test_dropna_table(backend, alltypes, how, subset):
table = alltypes.mutate(na_col=np.nan)
table = table.mutate(none_col=None)
table = table.mutate(none_col=table['none_col'].cast('float64'))
table_pandas = table.execute()
result = table.dropna(subset, how).execute().reset_index(drop=True)
subset = util.promote_list(subset) if subset else table_pandas.columns
expected = table_pandas.dropna(how=how, subset=subset).reset_index(
drop=True
)
# check_dtype is False here because there are dtype diffs between
# Pyspark and Pandas on Java 8 - the 'bool_col' of an empty DataFrame
# is type object in Pyspark, and type bool in Pandas. This diff does
# not exist in Java 11.
backend.assert_frame_equal(result, expected, check_dtype=False)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Locally-connected layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import activations
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.utils import conv_utils
class LocallyConnected1D(Layer):
"""Locally-connected layer for 1D inputs.
The `LocallyConnected1D` layer works similarly to
the `Conv1D` layer, except that weights are unshared,
that is, a different set of filters is applied at each different patch
of the input.
Example:
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(LocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: Currently only supports `"valid"` (case-insensitive).
`"same"` may be supported in the future.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(LocallyConnected1D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected1D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=3)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
input_dim = input_shape[2]
if input_dim is None:
raise ValueError('Axis 2 of input should be fully-defined. '
'Found shape:', input_shape)
output_length = conv_utils.conv_output_length(
input_shape[1], self.kernel_size[0], self.padding, self.strides[0])
self.kernel_shape = (output_length, self.kernel_size[0] * input_dim,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(output_length, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(ndim=3, axes={2: input_dim})
self.built = True
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
length = conv_utils.conv_output_length(input_shape[1], self.kernel_size[0],
self.padding, self.strides[0])
return tensor_shape.TensorShape([input_shape[0], length, self.filters])
def call(self, inputs):
output = K.local_conv1d(inputs, self.kernel, self.kernel_size, self.strides)
if self.use_bias:
output = K.bias_add(output, self.bias)
if self.activation is not None:
output = self.activation(output)
return output
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LocallyConnected2D(Layer):
"""Locally-connected layer for 2D inputs.
The `LocallyConnected2D` layer works similarly
to the `Conv2D` layer, except that weights are unshared,
that is, a different set of filters is applied at each
different patch of the input.
Examples:
```python
# apply a 3x3 unshared weights convolution with 64 output filters on a
32x32 image
# with `data_format="channels_last"`:
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64
parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(LocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: Currently only support `"valid"` (case-insensitive).
`"same"` will be supported in future.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(LocallyConnected2D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected2D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=4)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
input_row, input_col = input_shape[1:-1]
input_filter = input_shape[3]
else:
input_row, input_col = input_shape[2:]
input_filter = input_shape[1]
if input_row is None or input_col is None:
raise ValueError('The spatial dimensions of the inputs to '
' a LocallyConnected2D layer '
'should be fully-defined, but layer received '
'the inputs shape ' + str(input_shape))
output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],
self.padding, self.strides[0])
output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],
self.padding, self.strides[1])
self.output_row = output_row
self.output_col = output_col
self.kernel_shape = (
output_row * output_col,
self.kernel_size[0] * self.kernel_size[1] * input_filter, self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(output_row, output_col, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
else:
self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
self.built = True
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], self.filters, rows, cols])
elif self.data_format == 'channels_last':
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, self.filters])
def call(self, inputs):
output = K.local_conv2d(inputs,
self.kernel,
self.kernel_size,
self.strides,
(self.output_row, self.output_col),
self.data_format)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| |
# Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
import copy
from oslo.config import cfg
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU',
deprecated_group='DEFAULT',
deprecated_name='libvirt_use_virtio_for_bridges'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts, 'libvirt')
CONF.import_opt('virt_type', 'nova.virt.libvirt.driver', group='libvirt')
CONF.import_opt('use_ipv6', 'nova.netconf')
# Since libvirt 0.9.11, <interface type='bridge'>
# supports OpenVSwitch natively.
LIBVIRT_OVS_VPORT_VERSION = 9011
DEV_PREFIX_ETH = 'eth'
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': ['virtio', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'],
'kvm': ['virtio', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'],
'xen': ['netfront', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'],
'lxc': [],
'uml': [],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
class LibvirtBaseVIFDriver(object):
def __init__(self, get_connection):
self.get_connection = get_connection
self.libvirt_version = None
def has_libvirt_version(self, want):
if self.libvirt_version is None:
conn = self.get_connection()
self.libvirt_version = conn.getLibVersion()
if self.libvirt_version >= want:
return True
return False
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_config(self, instance, vif, image_meta, inst_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
vif_model = image_meta.get('properties',
{}).get('hw_vif_model')
if vif_model is not None:
model = vif_model
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
if (model is None and
CONF.libvirt.virt_type in ('kvm', 'qemu') and
CONF.libvirt.use_virtio_for_bridges):
model = "virtio"
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if model == "virtio" and CONF.libvirt.virt_type == "qemu":
driver = "qemu"
if not is_vif_model_valid_for_virt(CONF.libvirt.virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=CONF.libvirt.virt_type)
designer.set_vif_guest_frontend_config(
conf, vif['address'], model, driver)
return conf
def plug(self, instance, vif):
pass
def unplug(self, instance, vif):
pass
class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
"""Generic VIF driver for libvirt networking."""
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_firewall_required(self):
# TODO(berrange): Extend this to use information from VIF model
# which can indicate whether the network provider (eg Neutron)
# has already applied firewall filtering itself.
if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver":
return True
return False
def get_config_bridge(self, instance, vif, image_meta, inst_type):
"""Get VIF configurations for bridge type."""
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
if self.get_firewall_required():
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_ethernet(self, instance, vif,
image_meta, inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ovs_bridge(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(vif),
self.get_ovs_interfaceid(vif),
self.get_vif_devname(vif))
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_hybrid(self, instance, vif, image_meta,
inst_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance, newvif,
image_meta, inst_type)
def get_config_ovs(self, instance, vif, image_meta, inst_type):
if self.get_firewall_required():
return self.get_config_ovs_hybrid(instance, vif,
image_meta,
inst_type)
elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
return self.get_config_ovs_bridge(instance, vif,
image_meta,
inst_type)
else:
return self.get_config_ovs_ethernet(instance, vif,
image_meta,
inst_type)
def get_config_ivs_hybrid(self, instance, vif, image_meta,
inst_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
inst_type)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
vif,
image_meta,
inst_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ivs(self, instance, vif, image_meta, inst_type):
if self.get_firewall_required():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
inst_type)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
inst_type)
def get_config_802qbg(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
params = vif["qbh_params"]
designer.set_vif_host_backend_802qbh_config(
conf, vif['network'].get_meta('interface'),
params['profileid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_mlnx_direct(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
designer.set_vif_host_backend_direct_config(conf, devname)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta, inst_type):
vif_type = vif['type']
LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s'),
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
elif vif_type == network_model.VIF_TYPE_BRIDGE:
return self.get_config_bridge(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_OVS:
return self.get_config_ovs(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_802_QBG:
return self.get_config_802qbg(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_802_QBH:
return self.get_config_802qbh(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_IVS:
return self.get_config_ivs(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_IOVISOR:
return self.get_config_iovisor(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT:
return self.get_config_mlnx_direct(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_MIDONET:
return self.get_config_midonet(instance,
vif,
image_meta,
inst_type)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
network = vif['network']
if (not network.get_meta('multi_host', False) and
network.get_meta('should_create_bridge', False)):
if network.get_meta('should_create_vlan', False):
iface = CONF.vlan_interface or \
network.get_meta('bridge_interface')
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
{'vlan': network.get_meta('vlan'),
'bridge': self.get_bridge_name(vif)},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network.get_meta('vlan'),
self.get_bridge_name(vif),
iface)
else:
iface = CONF.flat_interface or \
network.get_meta('bridge_interface')
LOG.debug(_("Ensuring bridge %s"),
self.get_bridge_name(vif), instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(vif),
iface)
def plug_ovs_ethernet(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
network = vif['network']
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
dev, iface_id, vif['address'],
instance['uuid'])
def plug_ovs_bridge(self, instance, vif):
"""No manual plugging required."""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id, vif['address'],
instance['uuid'])
def plug_ovs(self, instance, vif):
if self.get_firewall_required():
self.plug_ovs_hybrid(instance, vif)
elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
self.plug_ovs_bridge(instance, vif)
else:
self.plug_ovs_ethernet(instance, vif)
def plug_ivs_ethernet(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance['uuid'])
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ivs_vif_port(v2_name, iface_id, vif['address'],
instance['uuid'])
def plug_ivs(self, instance, vif):
if self.get_firewall_required():
self.plug_ivs_hybrid(instance, vif)
else:
self.plug_ivs_ethernet(instance, vif)
def plug_mlnx_direct(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
network = vif['network']
vnic_mac = vif['address']
device_id = instance['uuid']
fabric = network['meta']['physical_network']
dev_name = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric,
network_model.VIF_TYPE_MLNX_DIRECT, dev_name,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_802qbg(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
def plug_802qbh(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net.create_tap_dev(dev)
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance["project_id"]
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], 'pgtag2=%s' % net_id,
'pgtag1=%s' % tenant_id, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug(self, instance, vif):
vif_type = vif['type']
LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s'),
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
elif vif_type == network_model.VIF_TYPE_BRIDGE:
self.plug_bridge(instance, vif)
elif vif_type == network_model.VIF_TYPE_OVS:
self.plug_ovs(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBG:
self.plug_802qbg(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBH:
self.plug_802qbh(instance, vif)
elif vif_type == network_model.VIF_TYPE_IVS:
self.plug_ivs(instance, vif)
elif vif_type == network_model.VIF_TYPE_IOVISOR:
self.plug_iovisor(instance, vif)
elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT:
self.plug_mlnx_direct(instance, vif)
elif vif_type == network_model.VIF_TYPE_MIDONET:
self.plug_midonet(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
def unplug_ovs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
try:
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ovs_bridge(self, instance, vif):
"""No manual unplugging required."""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ovs(self, instance, vif):
if self.get_firewall_required():
self.unplug_ovs_hybrid(instance, vif)
elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
self.unplug_ovs_bridge(instance, vif)
else:
self.unplug_ovs_ethernet(instance, vif)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required():
self.unplug_ivs_hybrid(instance, vif)
else:
self.unplug_ivs_ethernet(instance, vif)
def unplug_mlnx_direct(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
network = vif['network']
vnic_mac = vif['address']
fabric = network['meta']['physical_network']
try:
utils.execute('ebrctl', 'del-port', fabric,
vnic_mac, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_802qbg(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
def unplug_802qbh(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
utils.execute('mm-ctl', '--unbind-port', port_id,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
iface_id = vif['id']
dev = self.get_vif_devname(vif)
try:
utils.execute('ifc_ctl', 'gateway', 'ifdown',
dev, 'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'del_port', dev,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug(self, instance, vif):
vif_type = vif['type']
LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s'),
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
elif vif_type == network_model.VIF_TYPE_BRIDGE:
self.unplug_bridge(instance, vif)
elif vif_type == network_model.VIF_TYPE_OVS:
self.unplug_ovs(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBG:
self.unplug_802qbg(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBH:
self.unplug_802qbh(instance, vif)
elif vif_type == network_model.VIF_TYPE_IVS:
self.unplug_ivs(instance, vif)
elif vif_type == network_model.VIF_TYPE_IOVISOR:
self.unplug_iovisor(instance, vif)
elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT:
self.unplug_mlnx_direct(instance, vif)
elif vif_type == network_model.VIF_TYPE_MIDONET:
self.unplug_midonet(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
| |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
from nova.api.validation.parameter_types import multi_params
from nova.objects import instance
legacy_block_device_mapping = {
'type': 'object',
'properties': {
'virtual_name': {
'type': 'string', 'maxLength': 255,
},
'volume_id': parameter_types.volume_id,
'snapshot_id': parameter_types.image_id,
'volume_size': parameter_types.volume_size,
# Do not allow empty device names and number values and
# containing spaces(defined in nova/block_device.py:from_api())
'device_name': {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'pattern': '^[a-zA-Z0-9._-r/]*$',
},
# Defined as boolean in nova/block_device.py:from_api()
'delete_on_termination': parameter_types.boolean,
'no_device': {},
# Defined as mediumtext in column "connection_info" in table
# "block_device_mapping"
'connection_info': {
'type': 'string', 'maxLength': 16777215
},
},
'additionalProperties': False
}
block_device_mapping_v2_new_item = {
# defined in nova/block_device.py:from_api()
# NOTE: Client can specify the Id with the combination of
# source_type and uuid, or a single attribute like volume_id/
# image_id/snapshot_id.
'source_type': {
'type': 'string',
'enum': ['volume', 'image', 'snapshot', 'blank'],
},
'uuid': {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'pattern': '^[a-zA-Z0-9._-]*$',
},
'image_id': parameter_types.image_id,
'destination_type': {
'type': 'string',
'enum': ['local', 'volume'],
},
# Defined as varchar(255) in column "guest_format" in table
# "block_device_mapping"
'guest_format': {
'type': 'string', 'maxLength': 255,
},
# Defined as varchar(255) in column "device_type" in table
# "block_device_mapping"
'device_type': {
'type': 'string', 'maxLength': 255,
},
# Defined as varchar(255) in column "disk_bus" in table
# "block_device_mapping"
'disk_bus': {
'type': 'string', 'maxLength': 255,
},
# Defined as integer in nova/block_device.py:from_api()
# NOTE(mriedem): boot_index=None is also accepted for backward
# compatibility with the legacy v2 API.
'boot_index': {
'type': ['integer', 'string', 'null'],
'pattern': '^-?[0-9]+$',
},
}
block_device_mapping_v2 = copy.deepcopy(legacy_block_device_mapping)
block_device_mapping_v2['properties'].update(block_device_mapping_v2_new_item)
_hints = {
'type': 'object',
'properties': {
'group': {
'type': 'string',
'format': 'uuid'
},
'different_host': {
# NOTE: The value of 'different_host' is the set of server
# uuids where a new server is scheduled on a different host.
# A user can specify one server as string parameter and should
# specify multiple servers as array parameter instead.
'oneOf': [
{
'type': 'string',
'format': 'uuid'
},
{
'type': 'array',
'items': parameter_types.server_id
}
]
},
'same_host': {
# NOTE: The value of 'same_host' is the set of server
# uuids where a new server is scheduled on the same host.
'type': ['string', 'array'],
'items': parameter_types.server_id
},
'query': {
# NOTE: The value of 'query' is converted to dict data with
# jsonutils.loads() and used for filtering hosts.
'type': ['string', 'object'],
},
# NOTE: The value of 'target_cell' is the cell name what cell
# a new server is scheduled on.
'target_cell': parameter_types.name,
'different_cell': {
'type': ['string', 'array'],
'items': {
'type': 'string'
}
},
'build_near_host_ip': parameter_types.ip_address,
'cidr': {
'type': 'string',
'pattern': '^\/[0-9a-f.:]+$'
},
},
# NOTE: As this Mail:
# http://lists.openstack.org/pipermail/openstack-dev/2015-June/067996.html
# pointed out the limit the scheduler-hints in the API is problematic. So
# relax it.
'additionalProperties': True
}
base_create = {
'type': 'object',
'properties': {
'server': {
'type': 'object',
'properties': {
'name': parameter_types.name,
# NOTE(gmann): In case of boot from volume, imageRef was
# allowed as the empty string also So keeping the same
# behavior and allow empty string in case of boot from
# volume only. Python code make sure empty string is
# not allowed for other cases.
'imageRef': parameter_types.image_id_or_empty_string,
'flavorRef': parameter_types.flavor_ref,
'adminPass': parameter_types.admin_password,
'metadata': parameter_types.metadata,
'networks': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'fixed_ip': parameter_types.ip_address,
'port': {
'oneOf': [{'type': 'string', 'format': 'uuid'},
{'type': 'null'}]
},
'uuid': {'type': 'string'},
},
'additionalProperties': False,
}
},
'OS-DCF:diskConfig': parameter_types.disk_config,
'accessIPv4': parameter_types.accessIPv4,
'accessIPv6': parameter_types.accessIPv6,
'personality': parameter_types.personality,
'availability_zone': parameter_types.name,
'block_device_mapping': {
'type': 'array',
'items': legacy_block_device_mapping
},
'block_device_mapping_v2': {
'type': 'array',
'items': block_device_mapping_v2
},
'config_drive': parameter_types.boolean,
'key_name': parameter_types.name,
'min_count': parameter_types.positive_integer,
'max_count': parameter_types.positive_integer,
'return_reservation_id': parameter_types.boolean,
'security_groups': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
# NOTE(oomichi): allocate_for_instance() of
# neutronv2/api.py gets security_group names
# or UUIDs from this parameter.
# parameter_types.name allows both format.
'name': parameter_types.name,
},
'additionalProperties': False,
}
},
'user_data': {
'type': 'string',
'format': 'base64',
'maxLength': 65535
}
},
'required': ['name', 'flavorRef'],
'additionalProperties': False,
},
'os:scheduler_hints': _hints,
'OS-SCH-HNT:scheduler_hints': _hints,
},
'required': ['server'],
'additionalProperties': False,
}
base_create_v20 = copy.deepcopy(base_create)
base_create_v20['properties']['server'][
'properties']['name'] = parameter_types.name_with_leading_trailing_spaces
base_create_v20['properties']['server']['properties'][
'availability_zone'] = parameter_types.name_with_leading_trailing_spaces
base_create_v20['properties']['server']['properties'][
'key_name'] = parameter_types.name_with_leading_trailing_spaces
base_create_v20['properties']['server']['properties'][
'security_groups']['items']['properties']['name'] = (
parameter_types.name_with_leading_trailing_spaces)
base_create_v20['properties']['server']['properties'][
'user_data'] = {
'oneOf': [{'type': 'string', 'format': 'base64', 'maxLength': 65535},
{'type': 'null'},
],
}
base_create_v219 = copy.deepcopy(base_create)
base_create_v219['properties']['server'][
'properties']['description'] = parameter_types.description
base_create_v232 = copy.deepcopy(base_create_v219)
base_create_v232['properties']['server'][
'properties']['networks']['items'][
'properties']['tag'] = parameter_types.tag
base_create_v232['properties']['server'][
'properties']['block_device_mapping_v2']['items'][
'properties']['tag'] = parameter_types.tag
# NOTE(artom) the following conditional was merged as
# "if version == '2.32'" The intent all along was to check whether
# version was greater than or equal to 2.32. In other words, we wanted
# to support tags in versions 2.32 and up, but ended up supporting them
# in version 2.32 only. Since we need a new microversion to add request
# body attributes, tags have been re-added in version 2.42.
# NOTE(gmann) Below schema 'base_create_v233' is added (builds on 2.19 schema)
# to keep the above mentioned behavior while merging the extension schema code
# into server schema file. Below is the ref code where BDM tag was originally
# got added for 2.32 microversion *only*.
# Ref- https://github.com/openstack/nova/blob/
# 9882a60e69a5ab8da314a199a56defc05098b743/nova/api/
# openstack/compute/block_device_mapping.py#L71
base_create_v233 = copy.deepcopy(base_create_v219)
base_create_v233['properties']['server'][
'properties']['networks']['items'][
'properties']['tag'] = parameter_types.tag
# 2.37 builds on 2.32 and makes the following changes:
# 1. server.networks is required
# 2. server.networks is now either an enum or a list
# 3. server.networks.uuid is now required to be a uuid
base_create_v237 = copy.deepcopy(base_create_v233)
base_create_v237['properties']['server']['required'].append('networks')
base_create_v237['properties']['server']['properties']['networks'] = {
'oneOf': [
{'type': 'array',
'items': {
'type': 'object',
'properties': {
'fixed_ip': parameter_types.ip_address,
'port': {
'oneOf': [{'type': 'string', 'format': 'uuid'},
{'type': 'null'}]
},
'uuid': {'type': 'string', 'format': 'uuid'},
},
'additionalProperties': False,
},
},
{'type': 'string', 'enum': ['none', 'auto']},
]}
# 2.42 builds on 2.37 and re-introduces the tag field to the list of network
# objects.
base_create_v242 = copy.deepcopy(base_create_v237)
base_create_v242['properties']['server']['properties']['networks'] = {
'oneOf': [
{'type': 'array',
'items': {
'type': 'object',
'properties': {
'fixed_ip': parameter_types.ip_address,
'port': {
'oneOf': [{'type': 'string', 'format': 'uuid'},
{'type': 'null'}]
},
'uuid': {'type': 'string', 'format': 'uuid'},
'tag': parameter_types.tag,
},
'additionalProperties': False,
},
},
{'type': 'string', 'enum': ['none', 'auto']},
]}
base_create_v242['properties']['server'][
'properties']['block_device_mapping_v2']['items'][
'properties']['tag'] = parameter_types.tag
# 2.52 builds on 2.42 and makes the following changes:
# Allowing adding tags to instances when booting
base_create_v252 = copy.deepcopy(base_create_v242)
base_create_v252['properties']['server']['properties']['tags'] = {
"type": "array",
"items": parameter_types.tag,
"maxItems": instance.MAX_TAG_COUNT
}
# 2.57 builds on 2.52 and removes the personality parameter.
base_create_v257 = copy.deepcopy(base_create_v252)
base_create_v257['properties']['server']['properties'].pop('personality')
# 2.63 builds on 2.57 and makes the following changes:
# Allowing adding trusted certificates to instances when booting
base_create_v263 = copy.deepcopy(base_create_v257)
base_create_v263['properties']['server']['properties'][
'trusted_image_certificates'] = parameter_types.trusted_certs
# Add volume type in block_device_mapping_v2.
base_create_v267 = copy.deepcopy(base_create_v263)
base_create_v267['properties']['server']['properties'][
'block_device_mapping_v2']['items'][
'properties']['volume_type'] = parameter_types.volume_type
base_update = {
'type': 'object',
'properties': {
'server': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'OS-DCF:diskConfig': parameter_types.disk_config,
'accessIPv4': parameter_types.accessIPv4,
'accessIPv6': parameter_types.accessIPv6,
},
'additionalProperties': False,
},
},
'required': ['server'],
'additionalProperties': False,
}
base_update_v20 = copy.deepcopy(base_update)
base_update_v20['properties']['server'][
'properties']['name'] = parameter_types.name_with_leading_trailing_spaces
base_update_v219 = copy.deepcopy(base_update)
base_update_v219['properties']['server'][
'properties']['description'] = parameter_types.description
base_rebuild = {
'type': 'object',
'properties': {
'rebuild': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'imageRef': parameter_types.image_id,
'adminPass': parameter_types.admin_password,
'metadata': parameter_types.metadata,
'preserve_ephemeral': parameter_types.boolean,
'OS-DCF:diskConfig': parameter_types.disk_config,
'accessIPv4': parameter_types.accessIPv4,
'accessIPv6': parameter_types.accessIPv6,
'personality': parameter_types.personality,
},
'required': ['imageRef'],
'additionalProperties': False,
},
},
'required': ['rebuild'],
'additionalProperties': False,
}
base_rebuild_v20 = copy.deepcopy(base_rebuild)
base_rebuild_v20['properties']['rebuild'][
'properties']['name'] = parameter_types.name_with_leading_trailing_spaces
base_rebuild_v219 = copy.deepcopy(base_rebuild)
base_rebuild_v219['properties']['rebuild'][
'properties']['description'] = parameter_types.description
base_rebuild_v254 = copy.deepcopy(base_rebuild_v219)
base_rebuild_v254['properties']['rebuild'][
'properties']['key_name'] = parameter_types.name_or_none
# 2.57 builds on 2.54 and makes the following changes:
# 1. Remove the personality parameter.
# 2. Add the user_data parameter which is nullable so user_data can be reset.
base_rebuild_v257 = copy.deepcopy(base_rebuild_v254)
base_rebuild_v257['properties']['rebuild']['properties'].pop('personality')
base_rebuild_v257['properties']['rebuild']['properties']['user_data'] = ({
'oneOf': [
{'type': 'string', 'format': 'base64', 'maxLength': 65535},
{'type': 'null'}
]
})
# 2.63 builds on 2.57 and makes the following changes:
# Allowing adding trusted certificates to instances when rebuilding
base_rebuild_v263 = copy.deepcopy(base_rebuild_v257)
base_rebuild_v263['properties']['rebuild']['properties'][
'trusted_image_certificates'] = parameter_types.trusted_certs
resize = {
'type': 'object',
'properties': {
'resize': {
'type': 'object',
'properties': {
'flavorRef': parameter_types.flavor_ref,
'OS-DCF:diskConfig': parameter_types.disk_config,
},
'required': ['flavorRef'],
'additionalProperties': False,
},
},
'required': ['resize'],
'additionalProperties': False,
}
create_image = {
'type': 'object',
'properties': {
'createImage': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'metadata': parameter_types.metadata
},
'required': ['name'],
'additionalProperties': False
}
},
'required': ['createImage'],
'additionalProperties': False
}
create_image_v20 = copy.deepcopy(create_image)
create_image_v20['properties']['createImage'][
'properties']['name'] = parameter_types.name_with_leading_trailing_spaces
reboot = {
'type': 'object',
'properties': {
'reboot': {
'type': 'object',
'properties': {
'type': {
'type': 'string',
'enum': ['HARD', 'Hard', 'hard', 'SOFT', 'Soft', 'soft']
}
},
'required': ['type'],
'additionalProperties': False
}
},
'required': ['reboot'],
'additionalProperties': False
}
trigger_crash_dump = {
'type': 'object',
'properties': {
'trigger_crash_dump': {
'type': 'null'
}
},
'required': ['trigger_crash_dump'],
'additionalProperties': False
}
JOINED_TABLE_QUERY_PARAMS_SERVERS = {
'block_device_mapping': parameter_types.common_query_param,
'services': parameter_types.common_query_param,
'metadata': parameter_types.common_query_param,
'system_metadata': parameter_types.common_query_param,
'info_cache': parameter_types.common_query_param,
'security_groups': parameter_types.common_query_param,
'pci_devices': parameter_types.common_query_param
}
# These fields are valid values for sort_keys before we start
# using schema validation, but are considered to be bad values
# and disabled to use. In order to avoid backward incompatibility,
# they are ignored instead of return HTTP 400.
SERVER_LIST_IGNORE_SORT_KEY = [
'architecture', 'cell_name', 'cleaned', 'default_ephemeral_device',
'default_swap_device', 'deleted', 'deleted_at', 'disable_terminate',
'ephemeral_gb', 'ephemeral_key_uuid', 'id', 'key_data', 'launched_on',
'locked', 'memory_mb', 'os_type', 'reservation_id', 'root_gb',
'shutdown_terminate', 'user_data', 'vcpus', 'vm_mode'
]
VALID_SORT_KEYS = {
"type": "string",
"enum": ['access_ip_v4', 'access_ip_v6', 'auto_disk_config',
'availability_zone', 'config_drive', 'created_at',
'display_description', 'display_name', 'host', 'hostname',
'image_ref', 'instance_type_id', 'kernel_id', 'key_name',
'launch_index', 'launched_at', 'locked_by', 'node', 'power_state',
'progress', 'project_id', 'ramdisk_id', 'root_device_name',
'task_state', 'terminated_at', 'updated_at', 'user_id', 'uuid',
'vm_state'] +
SERVER_LIST_IGNORE_SORT_KEY
}
query_params_v21 = {
'type': 'object',
'properties': {
'user_id': parameter_types.common_query_param,
'project_id': parameter_types.common_query_param,
# The alias of project_id. It should be removed in the
# future with microversion bump.
'tenant_id': parameter_types.common_query_param,
'launch_index': parameter_types.common_query_param,
# The alias of image. It should be removed in the
# future with microversion bump.
'image_ref': parameter_types.common_query_param,
'image': parameter_types.common_query_param,
'kernel_id': parameter_types.common_query_regex_param,
'ramdisk_id': parameter_types.common_query_regex_param,
'hostname': parameter_types.common_query_regex_param,
'key_name': parameter_types.common_query_regex_param,
'power_state': parameter_types.common_query_regex_param,
'vm_state': parameter_types.common_query_param,
'task_state': parameter_types.common_query_param,
'host': parameter_types.common_query_param,
'node': parameter_types.common_query_regex_param,
'flavor': parameter_types.common_query_regex_param,
'reservation_id': parameter_types.common_query_regex_param,
'launched_at': parameter_types.common_query_regex_param,
'terminated_at': parameter_types.common_query_regex_param,
'availability_zone': parameter_types.common_query_regex_param,
# NOTE(alex_xu): This is pattern matching, it didn't get any benefit
# from DB index.
'name': parameter_types.common_query_regex_param,
# The alias of name. It should be removed in the future
# with microversion bump.
'display_name': parameter_types.common_query_regex_param,
'description': parameter_types.common_query_regex_param,
# The alias of description. It should be removed in the
# future with microversion bump.
'display_description': parameter_types.common_query_regex_param,
'locked_by': parameter_types.common_query_regex_param,
'uuid': parameter_types.common_query_param,
'root_device_name': parameter_types.common_query_regex_param,
'config_drive': parameter_types.common_query_regex_param,
'access_ip_v4': parameter_types.common_query_regex_param,
'access_ip_v6': parameter_types.common_query_regex_param,
'auto_disk_config': parameter_types.common_query_regex_param,
'progress': parameter_types.common_query_regex_param,
'sort_key': multi_params(VALID_SORT_KEYS),
'sort_dir': parameter_types.common_query_param,
'all_tenants': parameter_types.common_query_param,
'soft_deleted': parameter_types.common_query_param,
'deleted': parameter_types.common_query_param,
'status': parameter_types.common_query_param,
'changes-since': multi_params({'type': 'string',
'format': 'date-time'}),
# NOTE(alex_xu): The ip and ip6 are implemented in the python.
'ip': parameter_types.common_query_regex_param,
'ip6': parameter_types.common_query_regex_param,
'created_at': parameter_types.common_query_regex_param,
},
# For backward-compatible additionalProperties is set to be True here.
# And we will either strip the extra params out or raise HTTP 400
# according to the params' value in the later process.
'additionalProperties': True,
# Prevent internal-attributes that are started with underscore from
# being striped out in schema validation, and raise HTTP 400 in API.
'patternProperties': {"^_": parameter_types.common_query_param}
}
# Update the joined-table fields to the list so it will not be
# stripped in later process, thus can be handled later in api
# to raise HTTP 400.
query_params_v21['properties'].update(
JOINED_TABLE_QUERY_PARAMS_SERVERS)
query_params_v21['properties'].update(
parameter_types.pagination_parameters)
query_params_v226 = copy.deepcopy(query_params_v21)
query_params_v226['properties'].update({
'tags': parameter_types.common_query_regex_param,
'tags-any': parameter_types.common_query_regex_param,
'not-tags': parameter_types.common_query_regex_param,
'not-tags-any': parameter_types.common_query_regex_param,
})
query_params_v266 = copy.deepcopy(query_params_v226)
query_params_v266['properties'].update({
'changes-before': multi_params({'type': 'string',
'format': 'date-time'}),
})
| |
# Copyright (c) 2003-2012 CORE Security Technologies:
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: ntlm.py 910 2013-11-08 19:13:10Z bethus $
#
import base64
import array
import struct
import calendar
import time
import hashlib
import random
import string
import binascii
from impacket.structure import Structure
# This is important. NTLMv2 is not negotiated by the client or server.
# It is used if set locally on both sides. Change this item if you don't want to use
# NTLMv2 by default and fall back to NTLMv1 (with EXTENDED_SESSION_SECURITY or not)
# Check the following links:
# http://davenport.sourceforge.net/ntlm.html
# http://blogs.msdn.com/b/openspecification/archive/2010/04/20/ntlm-keys-and-sundry-stuff.aspx
# http://social.msdn.microsoft.com/Forums/en-US/os_interopscenarios/thread/c8f488ed-1b96-4e06-bd65-390aa41138d1/
# So I'm setting a global variable to control this, this can also be set programmatically
USE_NTLMv2 = True # if false will fall back to NTLMv1 (or NTLMv1 with ESS a.k.a NTLM2)
def computeResponse(flags, serverChallenge, clientChallenge, serverName, domain, user, password, lmhash = '', nthash = '', use_ntlmv2 = USE_NTLMv2):
if use_ntlmv2:
return computeResponseNTLMv2(flags, serverChallenge, clientChallenge, serverName, domain, user, password, lmhash, nthash, use_ntlmv2 = use_ntlmv2)
else:
return computeResponseNTLMv1(flags, serverChallenge, clientChallenge, serverName, domain, user, password, lmhash, nthash, use_ntlmv2 = use_ntlmv2)
try:
POW = None
from Crypto.Cipher import ARC4
from Crypto.Cipher import DES
from Crypto.Hash import MD4
except Exception:
try:
import POW
except Exception:
print "Warning: You don't have any crypto installed. You need either POW or PyCrypto"
print "We suggest PyCrypto. See http://www.pycrypto.org/"
NTLM_AUTH_NONE = 1
NTLM_AUTH_CONNECT = 2
NTLM_AUTH_CALL = 3
NTLM_AUTH_PKT = 4
NTLM_AUTH_PKT_INTEGRITY = 5
NTLM_AUTH_PKT_PRIVACY = 6
NTLMSSP_KEY_56 = 0x80000000
NTLMSSP_KEY_EXCHANGE = 0x40000000
NTLMSSP_KEY_128 = 0x20000000
# NTLMSSP_ = 0x10000000
# NTLMSSP_ = 0x08000000
# NTLMSSP_ = 0x04000000
NTLMSSP_VERSION = 0x02000000
# NTLMSSP_ = 0x01000000
NTLMSSP_TARGET_INFO = 0x00800000
# NTLMSSP_ = 0x00200000
# NTLMSSP_ = 0x00100000
NTLMSSP_NTLM2_KEY = 0x00080000
NTLMSSP_NOT_NT_KEY = 0x00400000
NTLMSSP_CHALL_NOT_NT = 0x00040000
NTLMSSP_TARGET_TYPE_SERVER = 0x00020000
NTLMSSP_CHALL_INIT = 0x00010000
NTLMSSP_ALWAYS_SIGN = 0x00008000 # forces the other end to sign packets
NTLMSSP_LOCAL_CALL = 0x00004000
NTLMSSP_WORKSTATION = 0x00002000
NTLMSSP_DOMAIN = 0x00001000
# NTLMSSP_ = 0x00000800
# NTLMSSP_ = 0x00000400
NTLMSSP_NTLM_KEY = 0x00000200
NTLMSSP_NETWARE = 0x00000100
NTLMSSP_LM_KEY = 0x00000080
NTLMSSP_DATAGRAM = 0x00000040
NTLMSSP_SEAL = 0x00000020
NTLMSSP_SIGN = 0x00000010 # means packet is signed, if verifier is wrong it fails
# NTLMSSP_ = 0x00000008
NTLMSSP_TARGET = 0x00000004
NTLMSSP_OEM = 0x00000002
NTLMSSP_UNICODE = 0x00000001
# AV_PAIR constants
NTLMSSP_AV_EOL = 0x00
NTLMSSP_AV_HOSTNAME = 0x01
NTLMSSP_AV_DOMAINNAME = 0x02
NTLMSSP_AV_DNS_HOSTNAME = 0x03
NTLMSSP_AV_DNS_DOMAINNAME = 0x04
NTLMSSP_AV_DNS_TREENAME = 0x05
NTLMSSP_AV_FLAGS = 0x06
NTLMSSP_AV_TIME = 0x07
NTLMSSP_AV_RESTRICTIONS = 0x08
NTLMSSP_AV_TARGET_NAME = 0x09
NTLMSSP_AV_CHANNEL_BINDINGS = 0x0a
class AV_PAIRS():
def __init__(self, data = None):
self.fields = {}
if data is not None:
self.fromString(data)
def __setitem__(self,key,value):
self.fields[key] = (len(value),value)
def __getitem__(self, key):
if self.fields.has_key(key):
return self.fields[key]
return None
def __delitem__(self, key):
del self.fields[key]
def __len__(self):
return len(self.getData())
def __str__(self):
return len(self.getData())
def fromString(self, data):
tInfo = data
fType = 0xff
while fType is not NTLMSSP_AV_EOL:
fType = struct.unpack('<H',tInfo[:struct.calcsize('<H')])[0]
tInfo = tInfo[struct.calcsize('<H'):]
length = struct.unpack('<H',tInfo[:struct.calcsize('<H')])[0]
tInfo = tInfo[struct.calcsize('<H'):]
content = tInfo[:length]
self.fields[fType]=(length,content)
tInfo = tInfo[length:]
def dump(self):
for i in self.fields.keys():
print "%s: {%r}" % (i,self[i])
def getData(self):
if self.fields.has_key(NTLMSSP_AV_EOL):
del self.fields[NTLMSSP_AV_EOL]
ans = ''
for i in self.fields.keys():
ans+= struct.pack('<HH', i, self[i][0])
ans+= self[i][1]
# end with a NTLMSSP_AV_EOL
ans += struct.pack('<HH', NTLMSSP_AV_EOL, 0)
return ans
class NTLMAuthMixin:
def get_os_version(self):
if self['os_version'] == '':
return None
else:
mayor_v = struct.unpack('B',self['os_version'][0])[0]
minor_v = struct.unpack('B',self['os_version'][1])[0]
build_v = struct.unpack('H',self['os_version'][2:4])
return (mayor_v,minor_v,build_v)
class NTLMAuthNegotiate(Structure, NTLMAuthMixin):
structure = (
('','"NTLMSSP\x00'),
('message_type','<L=1'),
('flags','<L'),
('domain_len','<H-domain_name'),
('domain_max_len','<H-domain_name'),
('domain_offset','<L=0'),
('host_len','<H-host_name'),
('host_maxlen','<H-host_name'),
('host_offset','<L=0'),
('os_version',':'),
('host_name',':'),
('domain_name',':'))
def __init__(self):
Structure.__init__(self)
self['flags']= (
NTLMSSP_KEY_128 |
NTLMSSP_KEY_EXCHANGE|
# NTLMSSP_LM_KEY |
NTLMSSP_NTLM_KEY |
NTLMSSP_UNICODE |
# NTLMSSP_ALWAYS_SIGN |
NTLMSSP_SIGN |
NTLMSSP_SEAL |
# NTLMSSP_TARGET |
0)
self['host_name']=''
self['domain_name']=''
self['os_version']=''
def getData(self):
if len(self.fields['host_name']) > 0:
self['flags'] |= NTLMSSP_WORKSTATION
if len(self.fields['domain_name']) > 0:
self['flags'] |= NTLMSSP_DOMAIN
if len(self.fields['os_version']) > 0:
self['flags'] |= NTLMSSP_VERSION
if (self['flags'] & NTLMSSP_VERSION) == NTLMSSP_VERSION:
version_len = 8
else:
version_len = 0
if (self['flags'] & NTLMSSP_WORKSTATION) == NTLMSSP_WORKSTATION:
self['host_offset']=32 + version_len
if (self['flags'] & NTLMSSP_DOMAIN) == NTLMSSP_DOMAIN:
self['domain_offset']=32+len(self['host_name']) + version_len
return Structure.getData(self)
def fromString(self,data):
Structure.fromString(self,data)
domain_offset = self['domain_offset']
domain_end = self['domain_len'] + domain_offset
self['domain_name'] = data[ domain_offset : domain_end ]
host_offset = self['host_offset']
host_end = self['host_len'] + host_offset
self['host_name'] = data[ host_offset : host_end ]
hasOsInfo = self['flags'] & NTLMSSP_VERSION
if len(data) >= 36 and hasOsInfo:
self['os_version'] = data[32:40]
else:
self['os_version'] = ''
class NTLMAuthChallenge(Structure):
structure = (
('','"NTLMSSP\x00'),
('message_type','<L=2'),
('domain_len','<H-domain_name'),
('domain_max_len','<H-domain_name'),
('domain_offset','<L=40'),
('flags','<L=0'),
('challenge','8s'),
('reserved','8s=""'),
('TargetInfoFields_len','<H-TargetInfoFields'),
('TargetInfoFields_max_len','<H-TargetInfoFields'),
('TargetInfoFields_offset','<L'),
('VersionLen','_-Version','self.checkVersion(self["flags"])'),
('Version',':'),
('domain_name',':'),
('TargetInfoFields',':'))
def checkVersion(self, flags):
if flags is not None:
if flags & NTLMSSP_VERSION == 0:
return 0
return 8
def getData(self):
if self['TargetInfoFields'] is not None and type(self['TargetInfoFields']) is not str:
raw_av_fields = self['TargetInfoFields'].getData()
self['TargetInfoFields'] = raw_av_fields
return Structure.getData(self)
def fromString(self,data):
Structure.fromString(self,data)
# Just in case there's more data after the TargetInfoFields
self['TargetInfoFields'] = self['TargetInfoFields'][:self['TargetInfoFields_len']]
# We gotta process the TargetInfoFields
#if self['TargetInfoFields_len'] > 0:
# av_pairs = AV_PAIRS(self['TargetInfoFields'][:self['TargetInfoFields_len']])
# self['TargetInfoFields'] = av_pairs
return self
class NTLMAuthChallengeResponse(Structure, NTLMAuthMixin):
structure = (
('','"NTLMSSP\x00'),
('message_type','<L=3'),
('lanman_len','<H-lanman'),
('lanman_max_len','<H-lanman'),
('lanman_offset','<L'),
('ntlm_len','<H-ntlm'),
('ntlm_max_len','<H-ntlm'),
('ntlm_offset','<L'),
('domain_len','<H-domain_name'),
('domain_max_len','<H-domain_name'),
('domain_offset','<L'),
('user_len','<H-user_name'),
('user_max_len','<H-user_name'),
('user_offset','<L'),
('host_len','<H-host_name'),
('host_max_len','<H-host_name'),
('host_offset','<L'),
('session_key_len','<H-session_key'),
('session_key_max_len','<H-session_key'),
('session_key_offset','<L'),
('flags','<L'),
('VersionLen','_-Version','self.checkVersion(self["flags"])'),
('Version',':=""'),
('MICLen','_-MIC','self.checkMIC(self["flags"])'),
('MIC',':=""'),
('domain_name',':'),
('user_name',':'),
('host_name',':'),
('lanman',':'),
('ntlm',':'),
('session_key',':'))
def __init__(self, username = '', password = '', challenge = '', lmhash = '', nthash = '', flags = 0):
Structure.__init__(self)
self['session_key']=''
self['user_name']=username.encode('utf-16le')
self['domain_name']='' #"CLON".encode('utf-16le')
self['host_name']='' #"BETS".encode('utf-16le')
self['flags'] = ( #authResp['flags']
# we think (beto & gera) that his flags force a memory conten leakage when a windows 2000 answers using uninitializaed verifiers
NTLMSSP_KEY_128 |
NTLMSSP_KEY_EXCHANGE|
# NTLMSSP_LM_KEY |
NTLMSSP_NTLM_KEY |
NTLMSSP_UNICODE |
# NTLMSSP_ALWAYS_SIGN |
NTLMSSP_SIGN |
NTLMSSP_SEAL |
# NTLMSSP_TARGET |
0)
# Here we do the stuff
if username and ( lmhash != '' or nthash != ''):
self['lanman'] = get_ntlmv1_response(lmhash, challenge)
self['ntlm'] = get_ntlmv1_response(nthash, challenge)
elif (username and password):
lmhash = compute_lmhash(password)
nthash = compute_nthash(password)
self['lanman']=get_ntlmv1_response(lmhash, challenge)
self['ntlm']=get_ntlmv1_response(nthash, challenge) # This is not used for LM_KEY nor NTLM_KEY
else:
self['lanman'] = ''
self['ntlm'] = ''
if not self['host_name']:
self['host_name'] = 'NULL'.encode('utf-16le') # for NULL session there must be a hostname
def checkVersion(self, flags):
if flags is not None:
if flags & NTLMSSP_VERSION == 0:
return 0
return 8
def checkMIC(self, flags):
# TODO: Find a proper way to check the MIC is in there
if flags is not None:
if flags & NTLMSSP_VERSION == 0:
return 0
return 16
def getData(self):
self['domain_offset']=64
self['user_offset']=64+len(self['domain_name'])
self['host_offset']=self['user_offset']+len(self['user_name'])
self['lanman_offset']=self['host_offset']+len(self['host_name'])
self['ntlm_offset']=self['lanman_offset']+len(self['lanman'])
self['session_key_offset']=self['ntlm_offset']+len(self['ntlm'])
return Structure.getData(self)
def fromString(self,data):
Structure.fromString(self,data)
# [MS-NLMP] page 27
# Payload data can be present in any order within the Payload field,
# with variable-length padding before or after the data
domain_offset = self['domain_offset']
domain_end = self['domain_len'] + domain_offset
self['domain_name'] = data[ domain_offset : domain_end ]
host_offset = self['host_offset']
host_end = self['host_len'] + host_offset
self['host_name'] = data[ host_offset: host_end ]
user_offset = self['user_offset']
user_end = self['user_len'] + user_offset
self['user_name'] = data[ user_offset: user_end ]
ntlm_offset = self['ntlm_offset']
ntlm_end = self['ntlm_len'] + ntlm_offset
self['ntlm'] = data[ ntlm_offset : ntlm_end ]
lanman_offset = self['lanman_offset']
lanman_end = self['lanman_len'] + lanman_offset
self['lanman'] = data[ lanman_offset : lanman_end]
#if len(data) >= 36:
# self['os_version'] = data[32:36]
#else:
# self['os_version'] = ''
class ImpacketStructure(Structure):
def set_parent(self, other):
self.parent = other
def get_packet(self):
return str(self)
def get_size(self):
return len(self)
class ExtendedOrNotMessageSignature(Structure):
def __init__(self, flags = 0, **kargs):
if flags & NTLMSSP_NTLM2_KEY:
self.structure = self.extendedMessageSignature
else:
self.structure = self.MessageSignature
return Structure.__init__(self, **kargs)
class NTLMMessageSignature(ExtendedOrNotMessageSignature):
extendedMessageSignature = (
('Version','<L=1'),
('Checksum','<q'),
('SeqNum','<i'),
)
MessageSignature = (
('Version','<L=1'),
('RandomPad','<i=0'),
('Checksum','<i'),
('SeqNum','<i'),
)
KNOWN_DES_INPUT = "KGS!@#$%"
def __expand_DES_key( key):
# Expand the key from a 7-byte password key into a 8-byte DES key
key = key[:7]
key += '\x00'*(7-len(key))
s = chr(((ord(key[0]) >> 1) & 0x7f) << 1)
s = s + chr(((ord(key[0]) & 0x01) << 6 | ((ord(key[1]) >> 2) & 0x3f)) << 1)
s = s + chr(((ord(key[1]) & 0x03) << 5 | ((ord(key[2]) >> 3) & 0x1f)) << 1)
s = s + chr(((ord(key[2]) & 0x07) << 4 | ((ord(key[3]) >> 4) & 0x0f)) << 1)
s = s + chr(((ord(key[3]) & 0x0f) << 3 | ((ord(key[4]) >> 5) & 0x07)) << 1)
s = s + chr(((ord(key[4]) & 0x1f) << 2 | ((ord(key[5]) >> 6) & 0x03)) << 1)
s = s + chr(((ord(key[5]) & 0x3f) << 1 | ((ord(key[6]) >> 7) & 0x01)) << 1)
s = s + chr((ord(key[6]) & 0x7f) << 1)
return s
def __DES_block(key, msg):
if POW:
cipher = POW.Symmetric(POW.DES_ECB)
cipher.encryptInit(__expand_DES_key(key))
return cipher.update(msg)
else:
cipher = DES.new(__expand_DES_key(key),DES.MODE_ECB)
return cipher.encrypt(msg)
def ntlmssp_DES_encrypt(key, challenge):
answer = __DES_block(key[:7], challenge)
answer += __DES_block(key[7:14], challenge)
answer += __DES_block(key[14:], challenge)
return answer
# High level functions to use NTLMSSP
def getNTLMSSPType1(workstation='', domain='', signingRequired = False, use_ntlmv2 = USE_NTLMv2):
# Let's prepare a Type 1 NTLMSSP Message
auth = NTLMAuthNegotiate()
auth['flags']=0
if signingRequired:
auth['flags'] = NTLMSSP_KEY_EXCHANGE | NTLMSSP_SIGN | NTLMSSP_ALWAYS_SIGN | NTLMSSP_SEAL
if use_ntlmv2:
auth['flags'] |= NTLMSSP_TARGET_INFO
auth['flags'] |= NTLMSSP_NTLM_KEY | NTLMSSP_NTLM2_KEY | NTLMSSP_UNICODE | NTLMSSP_TARGET | NTLMSSP_KEY_128 | NTLMSSP_KEY_56
auth['domain_name'] = domain
return auth
def getNTLMSSPType3(type1, type2, user, password, domain, lmhash = '', nthash = '', use_ntlmv2 = USE_NTLMv2):
ntlmChallenge = NTLMAuthChallenge(type2)
# Let's start with the original flags sent in the type1 message
responseFlags = type1['flags']
# Token received and parsed. Depending on the authentication
# method we will create a valid ChallengeResponse
ntlmChallengeResponse = NTLMAuthChallengeResponse(user, password, ntlmChallenge['challenge'])
clientChallenge = "".join([random.choice(string.digits+string.letters) for i in xrange(8)])
serverName = ntlmChallenge['TargetInfoFields']
ntResponse, lmResponse, sessionBaseKey = computeResponse(ntlmChallenge['flags'], ntlmChallenge['challenge'], clientChallenge, serverName, domain, user, password, lmhash, nthash, use_ntlmv2 )
# Let's check the return flags
if (ntlmChallenge['flags'] & NTLMSSP_NTLM2_KEY) == 0:
# No extended session security, taking it out
responseFlags &= 0xffffffff ^ NTLMSSP_NTLM2_KEY
if (ntlmChallenge['flags'] & NTLMSSP_KEY_128 ) == 0:
# No support for 128 key len, taking it out
responseFlags &= 0xffffffff ^ NTLMSSP_KEY_128
if (ntlmChallenge['flags'] & NTLMSSP_KEY_EXCHANGE) == 0:
# No key exchange supported, taking it out
responseFlags &= 0xffffffff ^ NTLMSSP_KEY_EXCHANGE
if (ntlmChallenge['flags'] & NTLMSSP_SEAL) == 0:
# No sign available, taking it out
responseFlags &= 0xffffffff ^ NTLMSSP_SEAL
if (ntlmChallenge['flags'] & NTLMSSP_SIGN) == 0:
# No sign available, taking it out
responseFlags &= 0xffffffff ^ NTLMSSP_SIGN
if (ntlmChallenge['flags'] & NTLMSSP_ALWAYS_SIGN) == 0:
# No sign available, taking it out
responseFlags &= 0xffffffff ^ NTLMSSP_ALWAYS_SIGN
keyExchangeKey = KXKEY(ntlmChallenge['flags'],sessionBaseKey, lmResponse, ntlmChallenge['challenge'], password, lmhash, nthash,use_ntlmv2)
# Special case for anonymous login
if user == '' and password == '' and lmhash == '' and nthash == '':
keyExchangeKey = '\x00'*16
# If we set up key exchange, let's fill the right variables
if ntlmChallenge['flags'] & NTLMSSP_KEY_EXCHANGE:
# not exactly what I call random tho :\
# exportedSessionKey = this is the key we should use to sign
exportedSessionKey = "".join([random.choice(string.digits+string.letters) for i in xrange(16)])
#exportedSessionKey = "A"*16
#print "keyExchangeKey %r" % keyExchangeKey
# Let's generate the right session key based on the challenge flags
#if responseFlags & NTLMSSP_NTLM2_KEY:
# Extended session security enabled
# if responseFlags & NTLMSSP_KEY_128:
# Full key
# exportedSessionKey = exportedSessionKey
# elif responseFlags & NTLMSSP_KEY_56:
# Only 56-bit key
# exportedSessionKey = exportedSessionKey[:7]
# else:
# exportedSessionKey = exportedSessionKey[:5]
#elif responseFlags & NTLMSSP_KEY_56:
# No extended session security, just 56 bits key
# exportedSessionKey = exportedSessionKey[:7] + '\xa0'
#else:
# exportedSessionKey = exportedSessionKey[:5] + '\xe5\x38\xb0'
encryptedRandomSessionKey = generateEncryptedSessionKey(keyExchangeKey, exportedSessionKey)
else:
encryptedRandomSessionKey = None
# [MS-NLMP] page 46
exportedSessionKey = keyExchangeKey
ntlmChallengeResponse['flags'] = responseFlags
ntlmChallengeResponse['domain_name'] = domain.encode('utf-16le')
ntlmChallengeResponse['lanman'] = lmResponse
ntlmChallengeResponse['ntlm'] = ntResponse
if encryptedRandomSessionKey is not None:
ntlmChallengeResponse['session_key'] = encryptedRandomSessionKey
return ntlmChallengeResponse, exportedSessionKey
# NTLMv1 Algorithm
def generateSessionKeyV1(password, lmhash, nthash):
if POW:
hash = POW.Digest(POW.MD4_DIGEST)
else:
hash = MD4.new()
hash.update(NTOWFv1(password, lmhash, nthash))
return hash.digest()
def computeResponseNTLMv1(flags, serverChallenge, clientChallenge, serverName, domain, user, password, lmhash='', nthash='', use_ntlmv2 = USE_NTLMv2):
if (user == '' and password == ''):
# Special case for anonymous authentication
lmResponse = ''
ntResponse = ''
else:
lmhash = LMOWFv1(password, lmhash, nthash)
nthash = NTOWFv1(password, lmhash, nthash)
if flags & NTLMSSP_LM_KEY:
ntResponse = ''
lmResponse = get_ntlmv1_response(lmhash, serverChallenge)
elif flags & NTLMSSP_NTLM2_KEY:
md5 = hashlib.new('md5')
chall = (serverChallenge + clientChallenge)
md5.update(chall)
ntResponse = ntlmssp_DES_encrypt(nthash, md5.digest()[:8])
lmResponse = clientChallenge + '\x00'*16
else:
ntResponse = get_ntlmv1_response(nthash,serverChallenge)
lmResponse = get_ntlmv1_response(lmhash, serverChallenge)
sessionBaseKey = generateSessionKeyV1(password, lmhash, nthash)
return ntResponse, lmResponse, sessionBaseKey
def compute_lmhash(password):
# This is done according to Samba's encryption specification (docs/html/ENCRYPTION.html)
password = password.upper()
lmhash = __DES_block(password[:7], KNOWN_DES_INPUT)
lmhash += __DES_block(password[7:14], KNOWN_DES_INPUT)
return lmhash
def NTOWFv1(password, lmhash = '', nthash=''):
if nthash != '':
return nthash
return compute_nthash(password)
def LMOWFv1(password, lmhash = '', nthash=''):
if lmhash != '':
return lmhash
return compute_lmhash(password)
def compute_nthash(password):
# This is done according to Samba's encryption specification (docs/html/ENCRYPTION.html)
password = unicode(password).encode('utf_16le')
if POW:
hash = POW.Digest(POW.MD4_DIGEST)
else:
hash = MD4.new()
hash.update(password)
return hash.digest()
def get_ntlmv1_response(key, challenge):
return ntlmssp_DES_encrypt(key, challenge)
# NTLMv2 Algorithm - as described in MS-NLMP Section 3.3.2
# Crypto Stuff
def MAC(flags, handle, signingKey, seqNum, message):
# [MS-NLMP] Section 3.4.4
# Returns the right messageSignature depending on the flags
messageSignature = NTLMMessageSignature(flags)
if flags & NTLMSSP_NTLM2_KEY:
if flags & NTLMSSP_KEY_EXCHANGE:
messageSignature['Version'] = 1
messageSignature['Checksum'] = struct.unpack('<q',handle(hmac_md5(signingKey, struct.pack('<i',seqNum)+message)[:8]))[0]
messageSignature['SeqNum'] = seqNum
seqNum += 1
else:
messageSignature['Version'] = 1
messageSignature['Checksum'] = struct.unpack('<q',hmac_md5(signingKey, struct.pack('<i',seqNum)+message)[:8])[0]
messageSignature['SeqNum'] = seqNum
seqNum += 1
else:
messageSignature['Version'] = 1
messageSignature['Checksum'] = struct.pack('<i',binascii.crc32(message))
messageSignature['RandomPad'] = 0
messageSignature['RandomPad'] = handle(struct.pack('<i',messageSignature['RandomPad']))
messageSignature['Checksum'] = struct.unpack('<i',handle(messageSignature['Checksum']))[0]
messageSignature['SeqNum'] = handle('\x00\x00\x00\x00')
messageSignature['SeqNum'] = struct.unpack('<i',messageSignature['SeqNum'])[0] ^ seqNum
messageSignature['RandomPad'] = 0
return messageSignature
def SEAL(flags, signingKey, sealingKey, messageToSign, messageToEncrypt, seqNum, handle):
sealedMessage = handle(messageToEncrypt)
signature = MAC(flags, handle, signingKey, seqNum, messageToSign)
return sealedMessage, signature
def SIGN(flags, signingKey, message, seqNum, handle):
return MAC(flags, handle, signingKey, seqNum, message)
def SIGNKEY(flags, randomSessionKey, mode = 'Client'):
if flags & NTLMSSP_NTLM2_KEY:
if mode == 'Client':
md5 = hashlib.new('md5')
md5.update(randomSessionKey + "session key to client-to-server signing key magic constant\x00")
signKey = md5.digest()
else:
md5 = hashlib.new('md5')
md5.update(randomSessionKey + "session key to server-to-client signing key magic constant\x00")
signKey = md5.digest()
else:
signKey = None
return signKey
def SEALKEY(flags, randomSessionKey, mode = 'Client'):
if flags & NTLMSSP_NTLM2_KEY:
if flags & NTLMSSP_KEY_128:
sealKey = randomSessionKey
elif flags & NTLMSSP_KEY_56:
sealKey = randomSessionKey[:7]
else:
sealKey = randomSessionKey[:5]
if mode == 'Client':
md5 = hashlib.new('md5')
md5.update(sealKey + 'session key to client-to-server sealing key magic constant\x00')
sealKey = md5.digest()
else:
md5 = hashlib.new('md5')
md5.update(sealKey + 'session key to server-to-client sealing key magic constant\x00')
sealKey = md5.digest()
elif flags & NTLMSSP_KEY_56:
sealKey = randomSessionKey[:7] + '\xa0'
else:
sealKey = randomSessionKey[:5] + '\xe5\x38\xb0'
return sealKey
def generateEncryptedSessionKey(keyExchangeKey, exportedSessionKey):
if POW:
cipher = POW.Symmetric(POW.RC4)
cipher.encryptInit(keyExchangeKey)
cipher_encrypt = cipher.update
else:
cipher = ARC4.new(keyExchangeKey)
cipher_encrypt = cipher.encrypt
sessionKey = cipher_encrypt(exportedSessionKey)
return sessionKey
def KXKEY(flags, sessionBaseKey, lmChallengeResponse, serverChallenge, password, lmhash, nthash, use_ntlmv2 = USE_NTLMv2):
if use_ntlmv2:
return sessionBaseKey
if flags & NTLMSSP_NTLM2_KEY:
if flags & NTLMSSP_NTLM_KEY:
keyExchangeKey = hmac_md5(sessionBaseKey, serverChallenge + lmChallengeResponse[:8])
else:
keyExchangeKey = sessionBaseKey
elif flags & NTLMSSP_NTLM_KEY:
if flags & NTLMSSP_LM_KEY:
keyExchangeKey = __DES_block(LMOWFv1(password,lmhash)[:7], lmChallengeResponse[:8]) + __DES_block(LMOWFv1(password,lmhash)[7] + '\xBD\xBD\xBD\xBD\xBD\xBD', lmChallengeResponse[:8])
elif flags & NTLMSSP_NOT_NT_KEY:
keyExchangeKey = LMOWFv1(password,lmhash)[:8] + '\x00'*8
else:
keyExchangeKey = sessionBaseKey
else:
raise "Can't create a valid KXKEY!"
return keyExchangeKey
def hmac_md5(key, data):
if POW:
h = POW.Hmac(POW.MD5_DIGEST, key)
h.update(data)
result = h.mac()
else:
import hmac
h = hmac.new(key)
h.update(data)
result = h.digest()
return result
def NTOWFv2( user, password, domain, hash = ''):
if hash != '':
theHash = hash
else:
theHash = compute_nthash(password)
return hmac_md5(theHash, user.upper().encode('utf-16le') + domain.encode('utf-16le'))
def LMOWFv2( user, password, domain, lmhash = ''):
return NTOWFv2( user, password, domain, lmhash)
def computeResponseNTLMv2(flags, serverChallenge, clientChallenge, serverName, domain, user, password, lmhash = '', nthash = '', use_ntlmv2 = USE_NTLMv2):
responseServerVersion = '\x01'
hiResponseServerVersion = '\x01'
responseKeyNT = NTOWFv2(user, password, domain, nthash)
responseKeyLM = LMOWFv2(user, password, domain, lmhash)
# If you're running test-ntlm, comment the following lines and uncoment the ones that are commented. Don't forget to turn it back after the tests!
######################
av_pairs = AV_PAIRS(serverName)
# In order to support SPN target name validation, we have to add this to the serverName av_pairs. Otherwise we will get access denied
# This is set at Local Security Policy -> Local Policies -> Security Options -> Server SPN target name validation level
av_pairs[NTLMSSP_AV_TARGET_NAME] = 'cifs/'.encode('utf-16le') + av_pairs[NTLMSSP_AV_HOSTNAME][1]
if av_pairs[NTLMSSP_AV_TIME] is not None:
aTime = av_pairs[NTLMSSP_AV_TIME][1]
else:
aTime = struct.pack('<q', (116444736000000000 + calendar.timegm(time.gmtime()) * 10000000) )
#aTime = '\x00'*8
av_pairs[NTLMSSP_AV_TIME] = aTime
serverName = av_pairs.getData()
######################
#aTime = '\x00'*8
######################
temp = responseServerVersion + hiResponseServerVersion + '\x00' * 6 + aTime + clientChallenge + '\x00' * 4 + serverName + '\x00' * 4
ntProofStr = hmac_md5(responseKeyNT, serverChallenge + temp)
ntChallengeResponse = ntProofStr + temp
lmChallengeResponse = hmac_md5(responseKeyNT, serverChallenge + clientChallenge) + clientChallenge
sessionBaseKey = hmac_md5(responseKeyNT, ntProofStr)
if (user == '' and password == ''):
# Special case for anonymous authentication
ntChallengeResponse = ''
lmChallengeResponse = ''
return ntChallengeResponse, lmChallengeResponse, sessionBaseKey
class NTLM_HTTP(object):
'''Parent class for NTLM HTTP classes.'''
MSG_TYPE = None
@classmethod
def get_instace(cls,msg_64):
msg = None
msg_type = 0
if msg_64 != '':
msg = base64.b64decode(msg_64[5:]) # Remove the 'NTLM '
msg_type = ord(msg[8])
for _cls in NTLM_HTTP.__subclasses__():
if msg_type == _cls.MSG_TYPE:
instance = _cls()
instance.fromString(msg)
return instance
class NTLM_HTTP_AuthRequired(NTLM_HTTP):
commonHdr = ()
# Message 0 means the first HTTP request e.g. 'GET /bla.png'
MSG_TYPE = 0
def fromString(self,data):
pass
class NTLM_HTTP_AuthNegotiate(NTLM_HTTP, NTLMAuthNegotiate):
commonHdr = ()
MSG_TYPE = 1
def __init__(self):
NTLMAuthNegotiate.__init__(self)
class NTLM_HTTP_AuthChallengeResponse(NTLM_HTTP, NTLMAuthChallengeResponse):
commonHdr = ()
MSG_TYPE = 3
def __init__(self):
NTLMAuthChallengeResponse.__init__(self)
| |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import urllib
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.common.utils import ismount
from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \
HTTPRequestEntityTooLarge
constraints_conf = ConfigParser()
constraints_conf.read('/etc/swift/swift.conf')
def constraints_conf_int(name, default):
try:
return int(constraints_conf.get('swift-constraints', name))
except (NoSectionError, NoOptionError):
return default
#: Max file size allowed for objects
MAX_FILE_SIZE = constraints_conf_int('max_file_size',
5368709122) # 5 * 1024 * 1024 * 1024 + 2
#: Max length of the name of a key for metadata
MAX_META_NAME_LENGTH = constraints_conf_int('max_meta_name_length', 128)
#: Max length of the value of a key for metadata
MAX_META_VALUE_LENGTH = constraints_conf_int('max_meta_value_length', 256)
#: Max number of metadata items
MAX_META_COUNT = constraints_conf_int('max_meta_count', 90)
#: Max overall size of metadata
MAX_META_OVERALL_SIZE = constraints_conf_int('max_meta_overall_size', 4096)
#: Max size of any header
MAX_HEADER_SIZE = constraints_conf_int('max_header_size', 8192)
#: Max object name length
MAX_OBJECT_NAME_LENGTH = constraints_conf_int('max_object_name_length', 1024)
#: Max object list length of a get request for a container
CONTAINER_LISTING_LIMIT = constraints_conf_int('container_listing_limit',
10000)
#: Max container list length of a get request for an account
ACCOUNT_LISTING_LIMIT = constraints_conf_int('account_listing_limit', 10000)
#: Max account name length
MAX_ACCOUNT_NAME_LENGTH = constraints_conf_int('max_account_name_length', 256)
#: Max container name length
MAX_CONTAINER_NAME_LENGTH = constraints_conf_int('max_container_name_length',
256)
# Maximum slo segments in buffer
MAX_BUFFERED_SLO_SEGMENTS = 10000
#: Query string format= values to their corresponding content-type values
FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json',
'xml': 'application/xml'}
def check_metadata(req, target_type):
"""
Check metadata sent in the request headers.
:param req: request object
:param target_type: str: one of: object, container, or account: indicates
which type the target storage for the metadata is
:returns: HTTPBadRequest with bad metadata otherwise None
"""
prefix = 'x-%s-meta-' % target_type.lower()
meta_count = 0
meta_size = 0
for key, value in req.headers.iteritems():
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
return HTTPBadRequest('Header Line Too Long')
if not key.lower().startswith(prefix):
continue
key = key[len(prefix):]
if not key:
return HTTPBadRequest(body='Metadata name cannot be empty',
request=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
return HTTPBadRequest(
body='Metadata name too long; max %d' % MAX_META_NAME_LENGTH,
request=req, content_type='text/plain')
elif len(value) > MAX_META_VALUE_LENGTH:
return HTTPBadRequest(
body='Metadata value too long; max %d' % MAX_META_VALUE_LENGTH,
request=req, content_type='text/plain')
elif meta_count > MAX_META_COUNT:
return HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
elif meta_size > MAX_META_OVERALL_SIZE:
return HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
return None
def check_object_creation(req, object_name):
"""
Check to ensure that everything is alright about an object to be created.
:param req: HTTP request object
:param object_name: name of object to be created
:returns HTTPRequestEntityTooLarge: the object is too large
:returns HTTPLengthRequired: missing content-length header and not
a chunked request
:returns HTTPBadRequest: missing or bad content-type header, or
bad metadata
"""
if req.content_length and req.content_length > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(body='Your request is too large.',
request=req,
content_type='text/plain')
if req.content_length is None and \
req.headers.get('transfer-encoding') != 'chunked':
return HTTPLengthRequired(request=req)
if 'X-Copy-From' in req.headers and req.content_length:
return HTTPBadRequest(body='Copy requests require a zero byte body',
request=req, content_type='text/plain')
if len(object_name) > MAX_OBJECT_NAME_LENGTH:
return HTTPBadRequest(body='Object name length of %d longer than %d' %
(len(object_name), MAX_OBJECT_NAME_LENGTH),
request=req, content_type='text/plain')
if 'Content-Type' not in req.headers:
return HTTPBadRequest(request=req, content_type='text/plain',
body='No content type')
if not check_utf8(req.headers['Content-Type']):
return HTTPBadRequest(request=req, body='Invalid Content-Type',
content_type='text/plain')
if 'x-object-manifest' in req.headers:
value = req.headers['x-object-manifest']
container = prefix = None
try:
container, prefix = value.split('/', 1)
except ValueError:
pass
if not container or not prefix or '?' in value or '&' in value or \
prefix[0] == '/':
return HTTPBadRequest(
request=req,
body='X-Object-Manifest must in the format container/prefix')
return check_metadata(req, 'object')
def check_mount(root, drive):
"""
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: True if it is a valid mounted device, False otherwise
"""
if not (urllib.quote_plus(drive) == drive):
return False
path = os.path.join(root, drive)
return ismount(path)
def check_float(string):
"""
Helper function for checking if a string can be converted to a float.
:param string: string to be verified as a float
:returns: True if the string can be converted to a float, False otherwise
"""
try:
float(string)
return True
except ValueError:
return False
def check_utf8(string):
"""
Validate if a string is valid UTF-8 str or unicode and that it
does not contain any null character.
:param string: string to be validated
:returns: True if the string is valid utf-8 str or unicode and
contains no null characters, False otherwise
"""
if not string:
return False
try:
if isinstance(string, unicode):
string.encode('utf-8')
else:
string.decode('UTF-8')
return '\x00' not in string
# If string is unicode, decode() will raise UnicodeEncodeError
# So, we should catch both UnicodeDecodeError & UnicodeEncodeError
except UnicodeError:
return False
| |
# Build a graph for an individual video frame
from nltk.corpus import wordnet as wn
import utilities.paths as paths
from os import listdir
from os.path import isfile, join
import string
DRIVE = paths.get_drive()
def add_to_dic(dict, k, v):
if k not in dict:
dict[k] = v
return dict
def remDashNum(s):
return s.split('-')[0]
def remPunc(s):
exclude = set(string.punctuation)
s = ''.join(ch for ch in s if ch not in exclude)
return s
test_set = 'test_msvd'
captions = {}
nouns = {x.name().split('.', 1)[0] for x in wn.all_synsets('n')}
verbs = {x.name().split('.', 1)[0] for x in wn.all_synsets('v')}
# Build sent dict
for vid in range(1,1971):
parsed_path = DRIVE + 'densecap-master/' + test_set + '/vis/vid' + str(vid) + '/results_parsed/'
unparsed_path = DRIVE + 'densecap-master/' + test_set + '/vis/vid' + str(vid) + '/results/'
p_files = [f for f in listdir(parsed_path) if isfile(join(parsed_path, f))]
for p_file in p_files:
if int(p_file[6:-4]) > 30:
continue
print "\n\n"+p_file+"\n"
f = open(unparsed_path+p_file, 'r')
f2 = open(parsed_path+p_file,'r')
parsed = []
count = 0
graph = []
rels = {}
for line in f2.readlines():
if count is 0:
if line == "\n":
count = 1
else:
graph.append(line.rstrip())
elif count is 1:
if line == "\n":
parsed.append([graph, rels])
graph = []
rels = {}
count = 0
else:
# rels.append(line.rstrip())
type_ = line.rstrip().split('(')[0]
a = line.rstrip().split('(')[1].split(',')[0]
b = line.rstrip().split('(')[1].split(',')[1][1:-1]
if type_[:5] == 'nmod:':
if 'nmod' in rels.keys():
rels['nmod'].append([type_[5:], a, b])
else:
rels['nmod'] = [[type_[5:], a, b]]
elif type_[:5] == 'conj:':
if 'conj' in rels.keys():
rels['conj'].append([type_[5:], a, b])
else:
rels['conj'] = [[type_[5:], a, b]]
else:
if type_ in rels.keys():
rels[type_].append([a,b])
else:
rels[type_] = [[a,b]]
c = 0
for line in f.readlines():
captions[line.rstrip()[:-1]] = parsed[c]
c += 1
f.close()
f2.close()
objects = {}
attributes = {}
relationships = {}
for k,v in captions.items():
print '\n ------------------ \n'
str = '\n'
for relk,relv in v[1].items():
# if relk == 'compound':
print relk
print relv
for relvi in relv:
if relk == 'nmod':
if 'acl' in v[1].keys():
tf = False
for relvip in v[1]['acl']:
if relvi[1] == relvip[1]:
tf = True
str += 'REL: ' + relvi[1] + ' ' + relvi[0] + '(' + relvip[0] + ',' + relvi[2] + ')\n'
add_to_dic(relationships, (remDashNum(relvi[1]) + ' ' + remDashNum(relvi[0])), [remDashNum(relvip[0]),remDashNum(relvi[2])])
add_to_dic(objects, remDashNum(relvip[0]), None)
add_to_dic(objects, remDashNum(relvi[2]), None)
if not tf:
str += 'REL: ' + relvi[0] + '(' + relvi[1] + ',' + relvi[2] + ')\n'
add_to_dic(relationships, remDashNum(relvi[0]), [remDashNum(relvi[1]), remDashNum(relvi[2])])
add_to_dic(objects, remDashNum(relvi[1]), None)
add_to_dic(objects, remDashNum(relvi[2]), None)
# elif 'nsubj' in v[1].keys():
# tf = False
# for relvip in v[1]['nsubj']:
# if relvi[1] == relvip[0]:
# tf = True
# str += 'REL: ' + relvi[1] + ' ' + relvi[0] + '(' + relvip[1] + ',' + relvi[2] + ')\n'
# if not tf:
# str += 'REL: ' + relvi[0] + '(' + relvi[1] + ',' + relvi[2] + ')\n'
else:
str += 'REL: ' + relvi[0] + '(' + relvi[1] + ',' + relvi[2] + ')\n'
add_to_dic(relationships, remDashNum(relvi[0]), [remDashNum(relvi[1]), remDashNum(relvi[2])])
add_to_dic(objects, remDashNum(relvi[1]), None)
add_to_dic(objects, remDashNum(relvi[2]), None)
if relk == 'amod':
str += 'ATT: ' + relvi[1] + '(' + relvi[0] + ')\n'
add_to_dic(attributes,remDashNum(relvi[1]),remDashNum(relvi[0]))
add_to_dic(objects, remDashNum(relvi[0]), None)
if relk == 'compound':
syns = wn.synsets(remDashNum(relvi[0]))
for x in range(len(syns)):
if remDashNum(relvi[1]) in remPunc(syns[x].definition()).split():
print 'DEF REL: part_of(' + remDashNum(relvi[1]) + ',' + syns[x].unicode_repr() + ')\n'
add_to_dic(relationships, 'part_of', [remDashNum(relvi[1]), syns[x]])
str += 'ATT: ' + relvi[1] + '(' + relvi[0] + ')\n'
add_to_dic(attributes,remDashNum(relvi[1]),remDashNum(relvi[0]))
add_to_dic(objects, remDashNum(relvi[0]), None)
# if relk == 'dep':
# str += 'ATT: ' + relvi[0] + '(' + relvi[1] + ')\n'
if relk == 'acl':
if 'dobj' in v[1].keys():
tf = False
for relvip in v[1]['dobj']:
if relvi[1] == relvip[0]:
tf = True
str += 'REL: ' + relvi[1] + '(' + relvi[0] + ',' + relvip[1] + ')\n'
add_to_dic(relationships, remDashNum(relvi[1]), [remDashNum(relvi[0]), remDashNum(relvip[1])])
add_to_dic(objects, remDashNum(relvi[0]), None)
add_to_dic(objects, remDashNum(relvip[1]), None)
if not tf:
str += 'ATT: ' + relvi[1] + '(' + relvi[0] + ')\n'
add_to_dic(attributes,remDashNum(relvi[1]),remDashNum(relvi[0]))
add_to_dic(objects, relvi[0], None)
else:
str += 'ATT: ' + relvi[1] + '(' + relvi[0] + ')\n'
add_to_dic(attributes,remDashNum(relvi[1]),remDashNum(relvi[0]))
add_to_dic(objects, remDashNum(relvi[0]), None)
if relk == 'nsubj':
if 'cop' in v[1].keys():
tf = False
for relvip in v[1]['cop']:
if relvi[0] == relvip[0]:
tf = True
str += 'ATT: ' + relvi[0] + '(' + relvi[1] + ')\n'
add_to_dic(attributes,remDashNum(relvi[0]),remDashNum(relvi[1]))
add_to_dic(objects, remDashNum(relvi[1]), None)
if not tf:
str += 'ATT: ' + relvi[1] + '(' + relvi[0] + ')\n'
add_to_dic(attributes,remDashNum(relvi[1]),remDashNum(relvi[0]))
add_to_dic(objects, remDashNum(relvi[0]), None)
elif 'aux' in v[1].keys():
tf = False
for relvip in v[1]['aux']:
if relvi[0] == relvip[0]:
tf = True
str += 'ATT: ' + relvi[0] + '(' + relvi[1] + ')\n'
add_to_dic(attributes,remDashNum(relvi[0]),remDashNum(relvi[1]))
add_to_dic(objects,remDashNum(relvi[1]),None)
if not tf:
str += 'ATT: ' + relvi[1] + '(' + relvi[0] + ')\n'
add_to_dic(attributes,remDashNum(relvi[1]),remDashNum(relvi[0]))
add_to_dic(objects,remDashNum(relvi[0]),None)
else:
str += 'ATT: '+relvi[1]+'('+relvi[0]+')\n'
add_to_dic(attributes,remDashNum(relvi[1]),remDashNum(relvi[0]))
add_to_dic(objects,remDashNum(relvi[0]),None)
print objects
print relationships
print attributes
# break
break
| |
#!/usr/bin/env python
"""
This module is for working with Micron Optics interrogators.
"""
from __future__ import print_function, division
import datetime
import socket
import struct
import time
import json
import sys
import numpy as np
__version__ = "0.0.2"
class Interrogator(object):
def __init__(self, ip_address="192.168.1.166", port=1852, fbg_props=None):
self.ip_address = ip_address
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.latest_response = ""
self.sensors = []
if fbg_props:
self.create_sensors(fbg_props)
self.sample_rate = 1000
self.append_data = False
self.stream_data = False
self.data = {}
self.acq_counter = 0
def connect(self):
self.socket.connect((self.ip_address, self.port))
def send_command(self, command, receive=True):
if command[0] != "#":
command = "#" + command
if command[-1] != "\n":
command += "\n"
self.socket.send(command.encode("ascii"))
if receive:
respsize = int(self.socket.recv(10))
self.latest_response = self.socket.recv(respsize)
@property
def idn(self):
self.send_command("IDN?")
return self.latest_response.decode()
@property
def serial_no(self):
self.send_command("GET_SN")
return self.latest_response.decode()
@property
def operating_mode(self):
self.send_command("GET_OPERATING_MODE")
return int(self.latest_response)
@operating_mode.setter
def operating_mode(self, mode):
self.send_command("SET_OPERATING_MODE {}".format(mode))
if self.latest_response.decode() != "Setting Operating mode to {}.\n".format(mode):
raise ValueError("Invalid value for operating mode.")
@property
def trig_mode(self):
self.send_command("GET_TRIG_MODE")
vals = {0: "untriggered",
1: "software",
3: "hardware"}
return vals[int(self.latest_response)]
@trig_mode.setter
def trig_mode(self, mode):
"""
Mode=0 for untriggered, 1 for S/W triggered, and 3 for H/W triggered.
Alternatively, this may be set as "untriggered", "software", or
"hardware".
"""
if mode == "untriggered":
mode = 0
elif mode == "software":
mode = 1
elif mode == "hardware":
mode = 3
self.send_command("SET_TRIG_MODE {}".format(mode))
if self.latest_response.decode() != "Setting triggering mode to {}.\n".format(mode):
raise ValueError("Invalid value for triggering mode.")
@property
def trig_start_edge(self):
self.send_command("GET_TRIG_START_EDGE")
vals = {0: "rising",
1: "falling"}
return vals[int(self.latest_response)]
@trig_start_edge.setter
def trig_start_edge(self, value):
"""0 for rising, 1 for falling."""
if value == "rising":
value = 0
elif value == "falling":
value = 1
self.send_command("SET_TRIG_START_EDGE {}".format(value))
@property
def trig_stop_type(self):
"""
This command returns the configured trigger stop event for the
connected x30 core. Either edge stop triggering or fixed number of
acquisitions will be indicated.
"""
self.send_command("GET_TRIG_STOP_TYPE")
vals = {0: "num_acq",
1: "edge"}
return vals[int(self.latest_response)]
@trig_stop_type.setter
def trig_stop_type(self, value):
"""
This command sets the trigger stop event from among two choices. The
first choice, indicated with a parameter value of zero, is a stop
trigger upon reaching a number of acquisitions following the start
trigger. The second choice, indicated with a parameter value of 1, is a
stop trigger on presence of another edge signal, be it rising or
falling, as determined by the #SET_TRIG_STOP_EDGE command.
"""
if value == "num_acq":
value = 0
elif value == "edge":
value = 1
self.send_command("SET_TRIG_STOP_TYPE {}".format(value))
@property
def trig_stop_edge(self):
self.send_command("GET_TRIG_STOP_EDGE")
vals = {0: "rising",
1: "falling"}
return vals[int(self.latest_response)]
@trig_stop_edge.setter
def trig_stop_edge(self, value):
"""0 for rising and 1 for falling."""
if value == "rising":
value = 0
elif value == "falling":
value = 1
self.send_command("SET_TRIG_STOP_EDGE {}".format(value))
@property
def trig_num_acq(self):
self.send_command("GET_TRIG_NUM_ACQ")
return int(self.latest_response)
@trig_num_acq.setter
def trig_num_acq(self, value):
"""Sets the number of acquisitions following a trigger."""
self.send_command("SET_TRIG_NUM_ACQ {}".format(value))
@property
def auto_retrig(self):
self.send_command("GET_AUTO_RETRIG")
return bool(int(self.latest_response))
@auto_retrig.setter
def auto_retrig(self, value):
"""
This command configures whether or not the x30 core will automatically
retrigger. If #SET_AUTO_RETRIG is set to zero, then only one start
trigger event will be observed, and only a single data acquisition
event will occur. If #SET_AUTO_RETRIG is set to 1, then the module will
continually retrigger on subsequent start trigger events, following
each successful stop trigger.
"""
if value == True:
value = 1
elif value == False:
value = 0
self.send_command("SET_AUTO_RETRIG {}".format(value))
def sw_trig_start(self):
"""
This command initiates a software start trigger to the x30 core in
S/W triggering mode. The command can also be used to simulate a
hardware trigger start when the module is set to hardware triggering
mode.
"""
self.send_command("SW_TRIG_START")
def sw_trig_stop(self):
"""
This command initiates a software stop trigger to the x30 core in
S/W triggering mode. The command can also be used to simulate a
hardware trigger stop when the module is set to hardware triggering
mode.
"""
self.send_command("SW_TRIG_STOP")
def set_trigger_defaults(self, on=True):
"""Sets default trigger settings:
* Hardware triggered by falling edge
* Stop after rising edge
* Automatic retriggering on."""
if on:
self.trig_mode = "hardware"
self.trig_start_edge = "falling"
self.trig_stop_type = "edge"
self.trig_stop_edge = "rising"
self.auto_retrig = True
else:
self.trig_mode = "untriggered"
self.trig_start_edge = "rising"
self.trig_stop_edge = "falling"
self.trig_stop_type = "edge"
self.auto_retrig = False
@property
def capabilities(self):
self.send_command("GET_CAPABILITIES")
resp = int(self.latest_response)
spec_diag_view = bin(resp)[-1]
sensor_distance = bin(resp)[-2]
return(spec_diag_view, sensor_distance)
@property
def ch1_gain(self):
"""Returns channel 1 gain in decibels."""
self.send_command("GET_CH_GAIN_DB 1")
return float(self.latest_response)
@ch1_gain.setter
def ch1_gain(self, gain):
self.send_command("SET_CH_GAIN_DB 1 {}".format(gain))
@property
def ch1_noise_thresh(self):
self.send_command("GET_CH_NOISE_THRESH 1")
return float(self.latest_response)
@ch1_noise_thresh.setter
def ch1_noise_thresh(self, val):
self.send_command("SET_CH_NOISE_THRESH 1 {}".format(val))
@property
def data_interleave(self):
self.send_command("GET_DATA_INTERLEAVE")
return(int(self.latest_response))
@data_interleave.setter
def data_interleave(self, value):
self.send_command("SET_DATA_INTERLEAVE {}".format(value))
if self.latest_response.decode() == "Data interleave set to {}".format(value):
self.sample_rate = 1000/value
@property
def data_rate_divider(self):
self.send_command("GET_DATA_RATE_DIVIDER")
return(int(self.latest_response))
@data_rate_divider.setter
def data_rate_divider(self, value):
self.send_command("SET_DATA_RATE_DIVIDER {}".format(value))
if self.latest_response.decode() == "Data rate divider set to {}".format(value):
self.sample_rate = 1000/value
@property
def num_averages(self):
"""Gets number of averages for the first sensor on the first channel."""
return self.get_num_averages(1, 1)
@num_averages.setter
def num_averages(self, value):
"""Sets num averages for all sensors on all channels."""
self.set_num_averages(value)
def get_num_averages(self, channel_no, sensor_no):
self.send_command("GET_NUM_AVERAGES {} {}".format(channel_no, sensor_no))
return(int(self.latest_response))
def set_num_averages(self, avgs, channel_no="", sensor_no=""):
if channel_no:
channel_no += " "
if sensor_no:
sensor_no += " "
self.send_command("SET_NUM_AVERAGES " + str(channel_no) \
+ str(sensor_no) + str(avgs))
if self.latest_response.decode().split()[0] != "Setting":
print("Failed to set number of averages")
def get_data(self):
if self.stream_data:
respsize = int(self.streaming_socket.recv(10))
response = self.streaming_socket.recv(respsize)
if self.stream_iteration < 1:
token = self.streaming_socket.recv(8)
else:
token = response[-8:]
response = response[:-8]
self.stream_iteration += 1
else:
self.send_command("GET_DATA")
response = self.latest_response
status_header = response[:88]
data = response[88:]
# unpack the struct into variables
(
fs_radix, cur_layer, fw_ver, abcde, # 0 fixed
fbg_thermistor, knpl, fghij, # 1 fixed
reserved2, tx_ambient_temp, # 2 fixed
num_fbg_peaks, num_ffpi_peaks, # 3 fixed
num_dut1_peaks, num_dut2_peaks, # 4 fixed
num_dut3_peaks, num_dut4_peaks, # 5 fixed
acq_counter, qr, reserved7, # 6 fixed
serial_number, # 7
kernel_timestamp_microseconds, # 8
kernel_timestamp_seconds, # 9
kernel_buffers, kernel_src_buffer, # 10 fixed
error_and_kernel_rt_loc0, # 11 needs parse
buffer, header_ver, header_length, # 12 fixed
dut1_gain, dut2_gain, # 13 fixed
dut3_gain, dut4_gain, # 14 fixed
dut1_noise_thresh, dut2_noise_thresh, # 15 fixed
dut3_noise_thresh, dut4_noise_thresh, # 16 fixed
peak_data_rate_div, hw_clk_div, # 17 fixed
granularity, # 18
reserved4, # 19
starting_lambda, # 20
ending_lambda # 21
) = struct.unpack(
'<' # big endian
'BBBB' # 0 needs parse
'HBB' # 1 needs parse
'HH' # 2
'HH' # 3
'HH' # 4
'HH' # 5
'HBB' # 6 needs parse
'I' # 7
'I' # 8
'I' # 9
'HH' # 10
'I' # 11 needs parse
'BBH' # 12
'HH' # 13
'HH' # 14
'HH' # 15
'HH' # 16
'HH' # 17
'I' # 18
'I' # 19
'I' # 20
'I', # 21
status_header
)
# 0 parse abcde
acq_triggered = bool(abcde & 0x80)
calibration_fault = bool(abcde & 0x40)
start_of_frame = bool(abcde & 0x20)
primary_fan_state = bool(abcde & 0x10)
secondary_fan_state = bool(abcde & 0x08)
s0_mux_state = bool(abcde & 0x04)
s1_mux_state = bool(abcde & 0x02)
s2_mux_state = bool(abcde & 0x01)
# 1 parse fghij
xfer_type = fghij >> 4
soa_therm_limit = bool(fghij & 0x08)
soa_current_limit = bool(fghij & 0x04)
tec_over_temp = bool(fghij & 0x02)
tec_under_temp = bool(fghij & 0x01)
# 1 parse knpl
operating_mode = knpl >> 6
triggering_mode = (knpl & 0x30) >> 4
sm041_mux_level = (knpl & 0x0c) >> 2
sw_position = knpl & 0x03
# 6 parse qr
nrz_command = qr >> 5
reserved6 = qr & 0x1f
# 11 parse
error = error_and_kernel_rt_loc0 >> 24
kernel_rt_loc0 = error_and_kernel_rt_loc0 & 0xffffff
self.data_header = {"Serial number": serial_number,
"FBG thermistor": fbg_thermistor,
"FS radix": fs_radix,
"Firmware version": fw_ver,
"Acquisition triggered": acq_triggered,
"Calibration fault": calibration_fault,
"Start of frame": start_of_frame,
"Primary fan state": primary_fan_state,
"Secondary fan state": secondary_fan_state,
"S0 mux state": s0_mux_state,
"Percent buffer": buffer,
"Header length": header_length,
"Header version": header_ver,
"Tx ambient temp": tx_ambient_temp,
"SM041 mux level": sm041_mux_level,
"HW clock div": hw_clk_div,
"Granularity": granularity,
"Operating mode": operating_mode,
"Starting lambda": starting_lambda,
"Ending lambda": ending_lambda,
"Kernel timestamp (seconds)": kernel_timestamp_seconds,
"Kernel timestamp (microseconds)": kernel_timestamp_microseconds,
"Kernel timestamp": datetime.datetime.fromtimestamp(kernel_timestamp_seconds),
"Triggering mode": triggering_mode,
"Error": error,
"Acquisition counter": acq_counter}
self.data_serial_no = serial_number
self.kernel_timestamp = float(kernel_timestamp_seconds) \
+ float(kernel_timestamp_microseconds)*1e-6
for n, sensor in enumerate(self.sensors):
try:
(sensor.wavelength,) = struct.unpack("<I", data[n*4:(n+1)*4])
sensor.wavelength /= granularity
except:
sensor.wavelength = np.nan
if self.append_data and error != 9:
self.do_append_data()
self.acq_counter = acq_counter
def flush_buffer(self, receive=True, verbose=False):
"""
This command flushes out the contents of the data buffer for the
present socket connection, clearing all data and resetting the buffer
count to zero.
"""
self.send_command("FLUSH_BUFFER", receive=receive)
if verbose and receive:
print(self.latest_response)
def enable_buffer(self):
self.send_command("SET_BUFFER_ENABLE 1")
def disable_buffer(self):
self.send_command("SET_BUFFER_ENABLE 0")
@property
def buffer_count(self):
"""
This command returns the number of entries stored in the internal data
buffer for the present socket connection. This value can range from 0
to 60,000 entries.
"""
self.send_command("GET_BUFFER_COUNT")
return int(self.latest_response)
def create_sensors_from_file(self, properties_file="Config/fbg_properties.json"):
with open(properties_file) as f:
self.fbg_properties = json.load(f)
self.create_sensors()
def create_sensors(self, fbg_props=None):
if fbg_props:
self.fbg_properties = fbg_props
self.sensors = [None]*len(self.fbg_properties)
for name, props in self.fbg_properties.items():
self.sensors[props["position"]] = Sensor(name, properties=props)
def setup_streaming(self, verbose=False):
self.setup_append_data()
self.streaming_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.streaming_socket.connect((self.ip_address, self.port))
command = "#SET_STREAMING_DATA 1\n"
self.streaming_socket.send(command.encode("ascii"))
respsize = int(self.streaming_socket.recv(10))
response = self.streaming_socket.recv(respsize)
if verbose:
print(response)
self.stream_data = True
self.stream_iteration = 0
def disable_streaming(self):
self.send_command("SET STREAMING DATA 0")
def setup_append_data(self):
self.create_data_dict()
self.append_data = True
def create_data_dict(self):
"""Technically this just creates new items rather than a new dict."""
self.data.clear()
for s in self.sensors:
self.data[s.name + "_wavelength"] = np.array([])
if "strain" in s.type:
self.data[s.name + "_strain"] = np.array([])
else:
self.data[s.name + "_" + s.type] = np.array([])
self.data["timestamp"] = np.array([])
self.data["time"] = np.array([])
self.data["serial_no"] = np.array([])
def do_append_data(self):
self.data["timestamp"] = np.append(self.data["timestamp"],
self.kernel_timestamp)
self.data["serial_no"] = np.append(self.data["serial_no"],
self.data_serial_no)
if len(self.data["time"]) == 0:
newtime = 0.0
else:
delta_t = self.data["timestamp"][-1] - self.data["timestamp"][-2]
newtime = self.data["time"][-1] + delta_t
self.data["time"] = np.append(self.data["time"], newtime)
for s in self.sensors:
self.data[s.name + "_wavelength"] = np.append(self.data[s.name + "_wavelength"],
s.wavelength)
if s.type == "strain" or s.type == "bare strain":
self.data[s.name + "_strain"] = np.append(self.data[s.name + "_strain"],
s.strain)
elif s.type == "temperature":
self.data[s.name + "_temperature"] = np.append(self.data[s.name + "_temperature"],
s.temperature)
def sleep(self):
time.sleep(1/self.sample_rate/2)
def zero_strain_sensors(self):
self.get_data()
for sensor in self.sensors:
if "strain" in sensor.type:
if not np.isnan(sensor.wavelength):
sensor.initial_wavelength = sensor.wavelength
else:
sensor.initial_wavelenth = sensor.nominal_wavelength
def save_settings(self):
self.send_command("SAVE_SETTINGS")
if self.latest_response.decode() == "Settings Saved.\n":
print("Settings saved")
else:
print("Saving settings unsuccessful")
def who(self):
"""Returns a list of IP addresses connected to the interrogator."""
self.send_command("WHO?")
return self.latest_response.decode()
def whoami(self):
"""Returns the IP address of the remote PC that sent the command."""
self.send_command("WHOAMI?")
return self.latest_response.decode()
def set_date(self, datestring):
self.send_command("SET_DATE {}".format(datestring))
def restart_network(self):
self.send_command("RESTART_NETWORK", receive=False)
def reboot(self):
self.send_command("REBOOT")
def disconnect(self):
self.socket.close()
class Sensor(object):
def __init__(self, name, properties=None):
self.name = name
self.properties = properties
self.position = None
self.type = None
self.part_no = None
self.serial_no = None
self.nominal_wavelength = None
self.gage_factor = None
self.gage_constant_1 = None
self.gage_constant_2 = None
self.temperature_change = 0.0
self.cte_specimen = None
self.wavelength_shift = None
self.wavelength = None
self.initial_wavelength = None
self.wavelength_offset = None
self.cal_coeff_1 = None
self.cal_coeff_2 = None
self.cal_coeff_3 = None
self.cal_coeff_0 = None
self.temp_sens = None
if self.properties:
self.load_properties()
def load_properties(self, properties=None):
if properties:
self.properties = properties
self.type = self.properties["sensor type"]
self.position = self.properties["position"]
try:
self.part_no = self.properties["part number"]
except KeyError:
pass
try:
self.serial_no = self.properties["serial number"]
except:
pass
self.nominal_wavelength = self.properties["nominal wavelength"]
if self.type == "strain":
self.gage_factor = self.properties["gage factor"]
self.gage_constant_1 = self.properties["gage constant 1"]
self.gage_constant_2 = self.properties["gage constant 2"]
self.cte_specimen = self.properties["CTE of test specimen"]
self.initial_wavelength = self.nominal_wavelength
elif self.type == "bare strain":
self.ke = self.properties["ke"]
self.initial_wavelength = self.nominal_wavelength
elif self.type == "temperature":
self.temp_at_nom_wavelength = self.properties["temperature at nominal wavelength"]
self.wavelength_offset = self.properties["wavelength offset"]
self.cal_coeff_0 = self.properties["calibration coeff. 0"]
self.cal_coeff_1 = self.properties["calibration coeff. 1"]
self.cal_coeff_2 = self.properties["calibration coeff. 2"]
self.cal_coeff_3 = self.properties["calibration coeff. 3"]
self.temp_sens = self.properties["temp. sensitivity"]
def load_properties_from_file(self, filename="Config/fbg_properties.json"):
"""Reads the properties in JSON format from the given file."""
with open(filename) as f:
self.properties = json.load(f)[self.name]
@property
def strain(self):
if self.type.lower() == "strain":
self.wavelength_shift = self.wavelength - self.initial_wavelength
self.thermal_output = self.temperature_change*\
(self.gage_constant_1/self.gage_factor + self.cte_specimen\
- self.gage_constant_2)
return (self.wavelength_shift/self.initial_wavelength)\
*1e6/self.gage_factor - self.thermal_output
elif self.type.lower() == "bare strain":
self.wavelength_shift = self.wavelength - self.initial_wavelength
return self.wavelength_shift/self.initial_wavelength/self.ke
else:
return None
@property
def temperature(self):
if self.type.lower() == "temperature":
return self.cal_coeff_3*(self.wavelength + self.wavelength_offset)**3 \
+ self.cal_coeff_2*(self.wavelength + self.wavelength_offset)**2 \
+ self.cal_coeff_1*(self.wavelength + self.wavelength_offset) \
+ self.cal_coeff_0
else:
return None
def terminal(ip_address="192.168.1.166", port=1852):
"""Creates a communcation terminal to send commands."""
if sys.version_info >= (3, 0):
input_function = input
raw_input = None
else:
input_function = raw_input
message = "#IDN?\n"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip_address, port))
while message != "exit":
message = input_function("#")
if message != "exit":
s.send(b"#" + message.encode("ascii") + b"\n")
respsize = int(s.recv(10))
response = s.recv(respsize)
print(response)
s.close()
if __name__ == "__main__":
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.