repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
ronenabr/lyx | lib/scripts/fen2ascii.py | 6 | 1042 | #! /usr/bin/env python
# file fen2ascii.py
# This file is part of LyX, the document processor.
# Licence details can be found in the file COPYING.
# author Kayvan A. Sylvan
# Full author contact details are available in file CREDITS.
# This script will convert a chess position in the FEN
# format to an ascii representation of the position.
import sys,string,os
os.close(0)
os.close(1)
sys.stdin = open(sys.argv[1],"r")
sys.stdout = open(sys.argv[2],"w")
line = sys.stdin.readline()
if line[-1] == '\n':
line = line[:-1]
line=string.split(line,' ')[0]
comp=string.split(line,'/')
cont=1
margin= " "*6
print margin+' +'+"-"*15+'+'
for i in range(8):
cont = cont + 1
tmp=""
for j in comp[i]:
if j>='0' and j <= '9':
for k in range(int(j)):
cont = cont + 1
x, mod = divmod(cont,2)
if mod : tmp = tmp + '| '
else : tmp = tmp + '|*'
else :
tmp = tmp + '|' + j
cont = cont + 1
row = 8 - i
print margin, row, tmp+"|"
print margin+' +'+"-"*15+'+'
print margin+' a b c d e f g h '
| gpl-2.0 |
oeeagle/quantum | neutron/plugins/nicira/NeutronPlugin.py | 2 | 110337 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
import logging
import os
from oslo.config import cfg
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc as sa_exc
import webob.exc
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.common import constants
from neutron.common import exceptions as q_exc
from neutron.common import utils
from neutron import context as q_context
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import l3_gwmode_db
from neutron.db import models_v2
from neutron.db import portbindings_db
from neutron.db import portsecurity_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net as ext_net_extn
from neutron.extensions import extraroute
from neutron.extensions import l3
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings as pbin
from neutron.extensions import portsecurity as psec
from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import excutils
from neutron.openstack.common import lockutils
from neutron.plugins.common import constants as plugin_const
from neutron.plugins.nicira.common import config # noqa
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.common import nsx_utils
from neutron.plugins.nicira.common import securitygroups as nvp_sec
from neutron.plugins.nicira.common import sync
from neutron.plugins.nicira.dbexts import distributedrouter as dist_rtr
from neutron.plugins.nicira.dbexts import maclearning as mac_db
from neutron.plugins.nicira.dbexts import nicira_db
from neutron.plugins.nicira.dbexts import nicira_networkgw_db as networkgw_db
from neutron.plugins.nicira.dbexts import nicira_qos_db as qos_db
from neutron.plugins.nicira import dhcpmeta_modes
from neutron.plugins.nicira.extensions import maclearning as mac_ext
from neutron.plugins.nicira.extensions import nvp_networkgw as networkgw
from neutron.plugins.nicira.extensions import nvp_qos as ext_qos
from neutron.plugins.nicira import nvp_cluster
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira import nvplib
LOG = logging.getLogger("NeutronPlugin")
NVP_NOSNAT_RULES_ORDER = 10
NVP_FLOATINGIP_NAT_RULES_ORDER = 224
NVP_EXTGW_NAT_RULES_ORDER = 255
NVP_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions')
NVP_DEFAULT_NEXTHOP = '1.1.1.1'
# Provider network extension - allowed network types for the NVP Plugin
class NetworkTypes:
"""Allowed provider network types for the NVP Plugin."""
L3_EXT = 'l3_ext'
STT = 'stt'
GRE = 'gre'
FLAT = 'flat'
VLAN = 'vlan'
BRIDGE = 'bridge'
def create_nvp_cluster(cluster_opts, concurrent_connections,
nsx_gen_timeout):
cluster = nvp_cluster.NVPCluster(**cluster_opts)
def _ctrl_split(x, y):
return (x, int(y), True)
api_providers = [_ctrl_split(*ctrl.split(':'))
for ctrl in cluster.nsx_controllers]
cluster.api_client = NvpApiClient.NVPApiHelper(
api_providers, cluster.nsx_user, cluster.nsx_password,
request_timeout=cluster.req_timeout,
http_timeout=cluster.http_timeout,
retries=cluster.retries,
redirects=cluster.redirects,
concurrent_connections=concurrent_connections,
nvp_gen_timeout=nsx_gen_timeout)
return cluster
class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
db_base_plugin_v2.NeutronDbPluginV2,
dhcpmeta_modes.DhcpMetadataAccess,
dist_rtr.DistributedRouter_mixin,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
mac_db.MacLearningDbMixin,
networkgw_db.NetworkGatewayMixin,
nvp_sec.NVPSecurityGroups,
portbindings_db.PortBindingMixin,
portsecurity_db.PortSecurityDbMixin,
qos_db.NVPQoSDbMixin,
securitygroups_db.SecurityGroupDbMixin):
"""L2 Virtual network plugin.
NvpPluginV2 is a Neutron plugin that provides L2 Virtual Network
functionality using NVP.
"""
supported_extension_aliases = ["agent",
"allowed-address-pairs",
"binding",
"dhcp_agent_scheduler",
"dist-router",
"ext-gw-mode",
"extraroute",
"mac-learning",
"multi-provider",
"network-gateway",
"nvp-qos",
"port-security",
"provider",
"quotas",
"external-net",
"router",
"security-group"]
__native_bulk_support = True
# Map nova zones to cluster for easy retrieval
novazone_cluster_map = {}
def __init__(self):
# TODO(salv-orlando): Replace These dicts with
# collections.defaultdict for better handling of default values
# Routines for managing logical ports in NVP
self.port_special_owners = [l3_db.DEVICE_OWNER_ROUTER_GW,
l3_db.DEVICE_OWNER_ROUTER_INTF]
self._port_drivers = {
'create': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nvp_create_ext_gw_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nvp_create_fip_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_create_router_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nvp_create_l2_gw_port,
'default': self._nvp_create_port},
'delete': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nvp_delete_ext_gw_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_delete_router_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nvp_delete_fip_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nvp_delete_port,
'default': self._nvp_delete_port}
}
neutron_extensions.append_api_extensions_path([NVP_EXT_PATH])
self.nvp_opts = cfg.CONF.NSX
self.nvp_sync_opts = cfg.CONF.NSX_SYNC
self.cluster = create_nvp_cluster(cfg.CONF,
self.nvp_opts.concurrent_connections,
self.nvp_opts.nsx_gen_timeout)
self.base_binding_dict = {
pbin.VIF_TYPE: pbin.VIF_TYPE_OVS,
pbin.CAPABILITIES: {
pbin.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
db.configure_db()
self._extend_fault_map()
self.setup_dhcpmeta_access()
# Set this flag to false as the default gateway has not
# been yet updated from the config file
self._is_default_net_gw_in_sync = False
# Create a synchronizer instance for backend sync
self._synchronizer = sync.NvpSynchronizer(
self, self.cluster,
self.nvp_sync_opts.state_sync_interval,
self.nvp_sync_opts.min_sync_req_delay,
self.nvp_sync_opts.min_chunk_size,
self.nvp_sync_opts.max_random_sync_delay)
def _ensure_default_network_gateway(self):
if self._is_default_net_gw_in_sync:
return
# Add the gw in the db as default, and unset any previous default
def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid
try:
ctx = q_context.get_admin_context()
self._unset_default_network_gateways(ctx)
if not def_l2_gw_uuid:
return
try:
def_network_gw = self._get_network_gateway(ctx,
def_l2_gw_uuid)
except networkgw_db.GatewayNotFound:
# Create in DB only - don't go on NVP
def_gw_data = {'id': def_l2_gw_uuid,
'name': 'default L2 gateway service',
'devices': []}
gw_res_name = networkgw.RESOURCE_NAME.replace('-', '_')
def_network_gw = super(
NvpPluginV2, self).create_network_gateway(
ctx, {gw_res_name: def_gw_data})
# In any case set is as default
self._set_default_network_gateway(ctx, def_network_gw['id'])
# Ensure this method is executed only once
self._is_default_net_gw_in_sync = True
except Exception:
LOG.exception(_("Unable to process default l2 gw service:%s"),
def_l2_gw_uuid)
raise
def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
"""Build ip_addresses data structure for logical router port.
No need to perform validation on IPs - this has already been
done in the l3_db mixin class.
"""
ip_addresses = []
for ip in fixed_ips:
if not subnet_ids or (ip['subnet_id'] in subnet_ids):
subnet = self._get_subnet(context, ip['subnet_id'])
ip_prefix = '%s/%s' % (ip['ip_address'],
subnet['cidr'].split('/')[1])
ip_addresses.append(ip_prefix)
return ip_addresses
def _create_and_attach_router_port(self, cluster, context,
router_id, port_data,
attachment_type, attachment,
attachment_vlan=None,
subnet_ids=None):
# Use a fake IP address if gateway port is not 'real'
ip_addresses = (port_data.get('fake_ext_gw') and
['0.0.0.0/31'] or
self._build_ip_address_list(context,
port_data['fixed_ips'],
subnet_ids))
try:
lrouter_port = nvplib.create_router_lport(
cluster, router_id, port_data.get('tenant_id', 'fake'),
port_data.get('id', 'fake'), port_data.get('name', 'fake'),
port_data.get('admin_state_up', True), ip_addresses,
port_data.get('mac_address'))
LOG.debug(_("Created NVP router port:%s"), lrouter_port['uuid'])
except NvpApiClient.NvpApiException:
LOG.exception(_("Unable to create port on NVP logical router %s"),
router_id)
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to create logical router port for neutron "
"port id %(port_id)s on router %(router_id)s") %
{'port_id': port_data.get('id'), 'router_id': router_id})
self._update_router_port_attachment(cluster, context, router_id,
port_data, lrouter_port['uuid'],
attachment_type, attachment,
attachment_vlan)
return lrouter_port
def _update_router_gw_info(self, context, router_id, info):
# NOTE(salvatore-orlando): We need to worry about rollback of NVP
# configuration in case of failures in the process
# Ref. LP bug 1102301
router = self._get_router(context, router_id)
# Check whether SNAT rule update should be triggered
# NVP also supports multiple external networks so there is also
# the possibility that NAT rules should be replaced
current_ext_net_id = router.gw_port_id and router.gw_port.network_id
new_ext_net_id = info and info.get('network_id')
# SNAT should be enabled unless info['enable_snat'] is
# explicitly set to false
enable_snat = new_ext_net_id and info.get('enable_snat', True)
# Remove if ext net removed, changed, or if snat disabled
remove_snat_rules = (current_ext_net_id and
new_ext_net_id != current_ext_net_id or
router.enable_snat and not enable_snat)
# Add rules if snat is enabled, and if either the external network
# changed or snat was previously disabled
# NOTE: enable_snat == True implies new_ext_net_id != None
add_snat_rules = (enable_snat and
(new_ext_net_id != current_ext_net_id or
not router.enable_snat))
router = super(NvpPluginV2, self)._update_router_gw_info(
context, router_id, info, router=router)
# Add/Remove SNAT rules as needed
# Create an elevated context for dealing with metadata access
# cidrs which are created within admin context
ctx_elevated = context.elevated()
if remove_snat_rules or add_snat_rules:
cidrs = self._find_router_subnets_cidrs(ctx_elevated, router_id)
if remove_snat_rules:
# Be safe and concede NAT rules might not exist.
# Therefore use min_num_expected=0
for cidr in cidrs:
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "SourceNatRule",
max_num_expected=1, min_num_expected=0,
source_ip_addresses=cidr)
if add_snat_rules:
ip_addresses = self._build_ip_address_list(
ctx_elevated, router.gw_port['fixed_ips'])
# Set the SNAT rule for each subnet (only first IP)
for cidr in cidrs:
cidr_prefix = int(cidr.split('/')[1])
nvplib.create_lrouter_snat_rule(
self.cluster, router_id,
ip_addresses[0].split('/')[0],
ip_addresses[0].split('/')[0],
order=NVP_EXTGW_NAT_RULES_ORDER - cidr_prefix,
match_criteria={'source_ip_addresses': cidr})
def _update_router_port_attachment(self, cluster, context,
router_id, port_data,
nvp_router_port_id,
attachment_type,
attachment,
attachment_vlan=None):
if not nvp_router_port_id:
nvp_router_port_id = self._find_router_gw_port(context, port_data)
try:
nvplib.plug_router_port_attachment(cluster, router_id,
nvp_router_port_id,
attachment,
attachment_type,
attachment_vlan)
LOG.debug(_("Attached %(att)s to NVP router port %(port)s"),
{'att': attachment, 'port': nvp_router_port_id})
except NvpApiClient.NvpApiException:
# Must remove NVP logical port
nvplib.delete_router_lport(cluster, router_id,
nvp_router_port_id)
LOG.exception(_("Unable to plug attachment in NVP logical "
"router port %(r_port_id)s, associated with "
"Neutron %(q_port_id)s"),
{'r_port_id': nvp_router_port_id,
'q_port_id': port_data.get('id')})
raise nvp_exc.NvpPluginException(
err_msg=(_("Unable to plug attachment in router port "
"%(r_port_id)s for neutron port id %(q_port_id)s "
"on router %(router_id)s") %
{'r_port_id': nvp_router_port_id,
'q_port_id': port_data.get('id'),
'router_id': router_id}))
def _get_port_by_device_id(self, context, device_id, device_owner):
"""Retrieve ports associated with a specific device id.
Used for retrieving all neutron ports attached to a given router.
"""
port_qry = context.session.query(models_v2.Port)
return port_qry.filter_by(
device_id=device_id,
device_owner=device_owner,).all()
def _find_router_subnets_cidrs(self, context, router_id):
"""Retrieve subnets attached to the specified router."""
ports = self._get_port_by_device_id(context, router_id,
l3_db.DEVICE_OWNER_ROUTER_INTF)
# No need to check for overlapping CIDRs
cidrs = []
for port in ports:
for ip in port.get('fixed_ips', []):
cidrs.append(self._get_subnet(context,
ip.subnet_id).cidr)
return cidrs
def _nvp_find_lswitch_for_port(self, context, port_data):
network = self._get_network(context, port_data['network_id'])
network_bindings = nicira_db.get_network_bindings(
context.session, port_data['network_id'])
max_ports = self.nvp_opts.max_lp_per_overlay_ls
allow_extra_lswitches = False
for network_binding in network_bindings:
if network_binding.binding_type in (NetworkTypes.FLAT,
NetworkTypes.VLAN):
max_ports = self.nvp_opts.max_lp_per_bridged_ls
allow_extra_lswitches = True
break
try:
return self._handle_lswitch_selection(self.cluster, network,
network_bindings, max_ports,
allow_extra_lswitches)
except NvpApiClient.NvpApiException:
err_desc = _("An exception occurred while selecting logical "
"switch for the port")
LOG.exception(err_desc)
raise nvp_exc.NvpPluginException(err_msg=err_desc)
def _nvp_create_port_helper(self, cluster, ls_uuid, port_data,
do_port_security=True):
return nvplib.create_lport(cluster, ls_uuid, port_data['tenant_id'],
port_data['id'], port_data['name'],
port_data['device_id'],
port_data['admin_state_up'],
port_data['mac_address'],
port_data['fixed_ips'],
port_data[psec.PORTSECURITY],
port_data[ext_sg.SECURITYGROUPS],
port_data.get(ext_qos.QUEUE),
port_data.get(mac_ext.MAC_LEARNING),
port_data.get(addr_pair.ADDRESS_PAIRS))
def _handle_create_port_exception(self, context, port_id,
ls_uuid, lp_uuid):
with excutils.save_and_reraise_exception():
# rollback nvp logical port only if it was successfully
# created on NVP. Should this command fail the original
# exception will be raised.
if lp_uuid:
# Remove orphaned port from NVP
nvplib.delete_port(self.cluster, ls_uuid, lp_uuid)
# rollback the neutron-nvp port mapping
nicira_db.delete_neutron_nsx_port_mapping(context.session,
port_id)
msg = (_("An exception occurred while creating the "
"quantum port %s on the NVP plaform") % port_id)
LOG.exception(msg)
def _nvp_create_port(self, context, port_data):
"""Driver for creating a logical switch port on NVP platform."""
# FIXME(salvatore-orlando): On the NVP platform we do not really have
# external networks. So if as user tries and create a "regular" VIF
# port on an external network we are unable to actually create.
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
LOG.error(_("NVP plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
lport = None
selected_lswitch = None
try:
selected_lswitch = self._nvp_find_lswitch_for_port(context,
port_data)
lport = self._nvp_create_port_helper(self.cluster,
selected_lswitch['uuid'],
port_data,
True)
nicira_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], lport['uuid'])
if port_data['device_owner'] not in self.port_special_owners:
nvplib.plug_interface(self.cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment",
port_data['id'])
LOG.debug(_("_nvp_create_port completed for port %(name)s "
"on network %(network_id)s. The new port id is "
"%(id)s."), port_data)
except (NvpApiClient.NvpApiException, q_exc.NeutronException):
self._handle_create_port_exception(
context, port_data['id'],
selected_lswitch and selected_lswitch['uuid'],
lport and lport['uuid'])
except db_exc.DBError as e:
if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
isinstance(e.inner_exception, sql_exc.IntegrityError)):
msg = (_("Concurrent network deletion detected; Back-end Port "
"%(nsx_id)s creation to be rolled back for Neutron "
"port: %(neutron_id)s")
% {'nsx_id': lport['uuid'],
'neutron_id': port_data['id']})
LOG.warning(msg)
if selected_lswitch and lport:
try:
nvplib.delete_port(self.cluster,
selected_lswitch['uuid'],
lport['uuid'])
except q_exc.NotFound:
LOG.debug(_("NSX Port %s already gone"), lport['uuid'])
def _nvp_delete_port(self, context, port_data):
# FIXME(salvatore-orlando): On the NVP platform we do not really have
# external networks. So deleting regular ports from external networks
# does not make sense. However we cannot raise as this would break
# unit tests.
if self._network_is_external(context, port_data['network_id']):
LOG.error(_("NVP plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
return
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nvp_port_id:
LOG.debug(_("Port '%s' was already deleted on NVP platform"), id)
return
# TODO(bgh): if this is a bridged network and the lswitch we just got
# back will have zero ports after the delete we should garbage collect
# the lswitch.
try:
nvplib.delete_port(self.cluster,
nvp_switch_id,
nvp_port_id)
LOG.debug(_("_nvp_delete_port completed for port %(port_id)s "
"on network %(net_id)s"),
{'port_id': port_data['id'],
'net_id': port_data['network_id']})
except q_exc.NotFound:
LOG.warning(_("Port %s not found in NVP"), port_data['id'])
def _nvp_delete_router_port(self, context, port_data):
# Delete logical router port
lrouter_id = port_data['device_id']
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nvp_port_id:
LOG.warn(_("Neutron port %(port_id)s not found on NVP backend. "
"Terminating delete operation. A dangling router port "
"might have been left on router %(router_id)s"),
{'port_id': port_data['id'],
'router_id': lrouter_id})
return
try:
nvplib.delete_peer_router_lport(self.cluster,
lrouter_id,
nvp_switch_id,
nvp_port_id)
except NvpApiClient.NvpApiException:
# Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing
# to do here
LOG.exception(_("Ignoring exception as this means the peer "
"for port '%s' has already been deleted."),
nvp_port_id)
# Delete logical switch port
self._nvp_delete_port(context, port_data)
def _nvp_create_router_port(self, context, port_data):
"""Driver for creating a switch port to be connected to a router."""
# No router ports on external networks!
if self._network_is_external(context, port_data['network_id']):
raise nvp_exc.NvpPluginException(
err_msg=(_("It is not allowed to create router interface "
"ports on external networks as '%s'") %
port_data['network_id']))
ls_port = None
selected_lswitch = None
try:
selected_lswitch = self._nvp_find_lswitch_for_port(
context, port_data)
# Do not apply port security here!
ls_port = self._nvp_create_port_helper(
self.cluster, selected_lswitch['uuid'],
port_data, False)
# Assuming subnet being attached is on first fixed ip
# element in port data
subnet_id = port_data['fixed_ips'][0]['subnet_id']
router_id = port_data['device_id']
# Create peer port on logical router
self._create_and_attach_router_port(
self.cluster, context, router_id, port_data,
"PatchAttachment", ls_port['uuid'],
subnet_ids=[subnet_id])
nicira_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], ls_port['uuid'])
LOG.debug(_("_nvp_create_router_port completed for port "
"%(name)s on network %(network_id)s. The new "
"port id is %(id)s."),
port_data)
except (NvpApiClient.NvpApiException, q_exc.NeutronException):
self._handle_create_port_exception(
context, port_data['id'],
selected_lswitch and selected_lswitch['uuid'],
ls_port and ls_port['uuid'])
def _find_router_gw_port(self, context, port_data):
router_id = port_data['device_id']
if not router_id:
raise q_exc.BadRequest(_("device_id field must be populated in "
"order to create an external gateway "
"port for network %s"),
port_data['network_id'])
lr_port = nvplib.find_router_gw_port(context, self.cluster, router_id)
if not lr_port:
raise nvp_exc.NvpPluginException(
err_msg=(_("The gateway port for the router %s "
"was not found on the NVP backend")
% router_id))
return lr_port
@lockutils.synchronized('nicira', 'neutron-')
def _nvp_create_ext_gw_port(self, context, port_data):
"""Driver for creating an external gateway port on NVP platform."""
# TODO(salvatore-orlando): Handle NVP resource
# rollback when something goes not quite as expected
lr_port = self._find_router_gw_port(context, port_data)
ip_addresses = self._build_ip_address_list(context,
port_data['fixed_ips'])
# This operation actually always updates a NVP logical port
# instead of creating one. This is because the gateway port
# is created at the same time as the NVP logical router, otherwise
# the fabric status of the NVP router will be down.
# admin_status should always be up for the gateway port
# regardless of what the user specifies in neutron
router_id = port_data['device_id']
nvplib.update_router_lport(self.cluster,
router_id,
lr_port['uuid'],
port_data['tenant_id'],
port_data['id'],
port_data['name'],
True,
ip_addresses)
ext_network = self.get_network(context, port_data['network_id'])
if ext_network.get(pnet.NETWORK_TYPE) == NetworkTypes.L3_EXT:
# Update attachment
physical_network = (ext_network[pnet.PHYSICAL_NETWORK] or
self.cluster.default_l3_gw_service_uuid)
self._update_router_port_attachment(
self.cluster, context, router_id, port_data,
lr_port['uuid'],
"L3GatewayAttachment",
physical_network,
ext_network[pnet.SEGMENTATION_ID])
LOG.debug(_("_nvp_create_ext_gw_port completed on external network "
"%(ext_net_id)s, attached to router:%(router_id)s. "
"NVP port id is %(nvp_port_id)s"),
{'ext_net_id': port_data['network_id'],
'router_id': router_id,
'nvp_port_id': lr_port['uuid']})
@lockutils.synchronized('nicira', 'neutron-')
def _nvp_delete_ext_gw_port(self, context, port_data):
lr_port = self._find_router_gw_port(context, port_data)
# TODO(salvatore-orlando): Handle NVP resource
# rollback when something goes not quite as expected
try:
# Delete is actually never a real delete, otherwise the NVP
# logical router will stop working
router_id = port_data['device_id']
nvplib.update_router_lport(self.cluster,
router_id,
lr_port['uuid'],
port_data['tenant_id'],
port_data['id'],
port_data['name'],
True,
['0.0.0.0/31'])
# Reset attachment
self._update_router_port_attachment(
self.cluster, context, router_id, port_data,
lr_port['uuid'],
"L3GatewayAttachment",
self.cluster.default_l3_gw_service_uuid)
except NvpApiClient.ResourceNotFound:
raise nvp_exc.NvpPluginException(
err_msg=_("Logical router resource %s not found "
"on NVP platform") % router_id)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to update logical router"
"on NVP Platform"))
LOG.debug(_("_nvp_delete_ext_gw_port completed on external network "
"%(ext_net_id)s, attached to router:%(router_id)s"),
{'ext_net_id': port_data['network_id'],
'router_id': router_id})
def _nvp_create_l2_gw_port(self, context, port_data):
"""Create a switch port, and attach it to a L2 gateway attachment."""
# FIXME(salvatore-orlando): On the NVP platform we do not really have
# external networks. So if as user tries and create a "regular" VIF
# port on an external network we are unable to actually create.
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
LOG.error(_("NVP plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
lport = None
try:
selected_lswitch = self._nvp_find_lswitch_for_port(
context, port_data)
lport = self._nvp_create_port_helper(
self.cluster,
selected_lswitch['uuid'],
port_data,
True)
nicira_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], lport['uuid'])
nvplib.plug_l2_gw_service(
self.cluster,
port_data['network_id'],
lport['uuid'],
port_data['device_id'],
int(port_data.get('gw:segmentation_id') or 0))
except Exception:
with excutils.save_and_reraise_exception():
if lport:
nvplib.delete_port(self.cluster,
selected_lswitch['uuid'],
lport['uuid'])
LOG.debug(_("_nvp_create_l2_gw_port completed for port %(name)s "
"on network %(network_id)s. The new port id "
"is %(id)s."), port_data)
def _nvp_create_fip_port(self, context, port_data):
# As we do not create ports for floating IPs in NVP,
# this is a no-op driver
pass
def _nvp_delete_fip_port(self, context, port_data):
# As we do not create ports for floating IPs in NVP,
# this is a no-op driver
pass
def _extend_fault_map(self):
"""Extends the Neutron Fault Map.
Exceptions specific to the NVP Plugin are mapped to standard
HTTP Exceptions.
"""
base.FAULT_MAP.update({nvp_exc.NvpInvalidNovaZone:
webob.exc.HTTPBadRequest,
nvp_exc.NvpNoMorePortsException:
webob.exc.HTTPBadRequest,
nvp_exc.MaintenanceInProgress:
webob.exc.HTTPServiceUnavailable})
def _validate_provider_create(self, context, network):
if not attr.is_attr_set(network.get(mpnet.SEGMENTS)):
return
for segment in network[mpnet.SEGMENTS]:
network_type = segment.get(pnet.NETWORK_TYPE)
physical_network = segment.get(pnet.PHYSICAL_NETWORK)
segmentation_id = segment.get(pnet.SEGMENTATION_ID)
network_type_set = attr.is_attr_set(network_type)
segmentation_id_set = attr.is_attr_set(segmentation_id)
err_msg = None
if not network_type_set:
err_msg = _("%s required") % pnet.NETWORK_TYPE
elif network_type in (NetworkTypes.GRE, NetworkTypes.STT,
NetworkTypes.FLAT):
if segmentation_id_set:
err_msg = _("Segmentation ID cannot be specified with "
"flat network type")
elif network_type == NetworkTypes.VLAN:
if not segmentation_id_set:
err_msg = _("Segmentation ID must be specified with "
"vlan network type")
elif (segmentation_id_set and
not utils.is_valid_vlan_tag(segmentation_id)):
err_msg = (_("%(segmentation_id)s out of range "
"(%(min_id)s through %(max_id)s)") %
{'segmentation_id': segmentation_id,
'min_id': constants.MIN_VLAN_TAG,
'max_id': constants.MAX_VLAN_TAG})
else:
# Verify segment is not already allocated
bindings = nicira_db.get_network_bindings_by_vlanid(
context.session, segmentation_id)
if bindings:
raise q_exc.VlanIdInUse(
vlan_id=segmentation_id,
physical_network=physical_network)
elif network_type == NetworkTypes.L3_EXT:
if (segmentation_id_set and
not utils.is_valid_vlan_tag(segmentation_id)):
err_msg = (_("%(segmentation_id)s out of range "
"(%(min_id)s through %(max_id)s)") %
{'segmentation_id': segmentation_id,
'min_id': constants.MIN_VLAN_TAG,
'max_id': constants.MAX_VLAN_TAG})
else:
err_msg = (_("%(net_type_param)s %(net_type_value)s not "
"supported") %
{'net_type_param': pnet.NETWORK_TYPE,
'net_type_value': network_type})
if err_msg:
raise q_exc.InvalidInput(error_message=err_msg)
# TODO(salvatore-orlando): Validate tranport zone uuid
# which should be specified in physical_network
def _extend_network_dict_provider(self, context, network,
multiprovider=None, bindings=None):
if not bindings:
bindings = nicira_db.get_network_bindings(context.session,
network['id'])
if not multiprovider:
multiprovider = nicira_db.is_multiprovider_network(context.session,
network['id'])
# With NVP plugin 'normal' overlay networks will have no binding
# TODO(salvatore-orlando) make sure users can specify a distinct
# phy_uuid as 'provider network' for STT net type
if bindings:
if not multiprovider:
# network came in through provider networks api
network[pnet.NETWORK_TYPE] = bindings[0].binding_type
network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid
network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id
else:
# network come in though multiprovider networks api
network[mpnet.SEGMENTS] = [
{pnet.NETWORK_TYPE: binding.binding_type,
pnet.PHYSICAL_NETWORK: binding.phy_uuid,
pnet.SEGMENTATION_ID: binding.vlan_id}
for binding in bindings]
def _handle_lswitch_selection(self, cluster, network,
network_bindings, max_ports,
allow_extra_lswitches):
lswitches = nvplib.get_lswitches(cluster, network.id)
try:
# TODO(salvatore-orlando) find main_ls too!
return [ls for ls in lswitches
if (ls['_relations']['LogicalSwitchStatus']
['lport_count'] < max_ports)].pop(0)
except IndexError:
# Too bad, no switch available
LOG.debug(_("No switch has available ports (%d checked)"),
len(lswitches))
if allow_extra_lswitches:
main_ls = [ls for ls in lswitches if ls['uuid'] == network.id]
tag_dict = dict((x['scope'], x['tag']) for x in main_ls[0]['tags'])
if 'multi_lswitch' not in tag_dict:
tags = main_ls[0]['tags']
tags.append({'tag': 'True', 'scope': 'multi_lswitch'})
nvplib.update_lswitch(cluster,
main_ls[0]['uuid'],
main_ls[0]['display_name'],
network['tenant_id'],
tags=tags)
transport_zone_config = self._convert_to_nvp_transport_zones(
cluster, network, bindings=network_bindings)
selected_lswitch = nvplib.create_lswitch(
cluster, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)),
transport_zone_config,
network.id)
return selected_lswitch
else:
LOG.error(_("Maximum number of logical ports reached for "
"logical network %s"), network.id)
raise nvp_exc.NvpNoMorePortsException(network=network.id)
def _convert_to_nvp_transport_zones(self, cluster, network=None,
bindings=None):
nvp_transport_zones_config = []
# Convert fields from provider request to nvp format
if (network and not attr.is_attr_set(
network.get(mpnet.SEGMENTS))):
return [{"zone_uuid": cluster.default_tz_uuid,
"transport_type": cfg.CONF.NSX.default_transport_type}]
# Convert fields from db to nvp format
if bindings:
transport_entry = {}
for binding in bindings:
if binding.binding_type in [NetworkTypes.FLAT,
NetworkTypes.VLAN]:
transport_entry['transport_type'] = NetworkTypes.BRIDGE
transport_entry['binding_config'] = {}
vlan_id = binding.vlan_id
if vlan_id:
transport_entry['binding_config'] = (
{'vlan_translation': [{'transport': vlan_id}]})
else:
transport_entry['transport_type'] = binding.binding_type
transport_entry['zone_uuid'] = binding.phy_uuid
nvp_transport_zones_config.append(transport_entry)
return nvp_transport_zones_config
for transport_zone in network.get(mpnet.SEGMENTS):
for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
if transport_zone.get(value) == attr.ATTR_NOT_SPECIFIED:
transport_zone[value] = None
transport_entry = {}
transport_type = transport_zone.get(pnet.NETWORK_TYPE)
if transport_type in [NetworkTypes.FLAT, NetworkTypes.VLAN]:
transport_entry['transport_type'] = NetworkTypes.BRIDGE
transport_entry['binding_config'] = {}
vlan_id = transport_zone.get(pnet.SEGMENTATION_ID)
if vlan_id:
transport_entry['binding_config'] = (
{'vlan_translation': [{'transport': vlan_id}]})
else:
transport_entry['transport_type'] = transport_type
transport_entry['zone_uuid'] = (
transport_zone[pnet.PHYSICAL_NETWORK] or
cluster.default_tz_uuid)
nvp_transport_zones_config.append(transport_entry)
return nvp_transport_zones_config
def _convert_to_transport_zones_dict(self, network):
"""Converts the provider request body to multiprovider.
Returns: True if request is multiprovider False if provider
and None if neither.
"""
if any(attr.is_attr_set(network.get(f))
for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)):
if attr.is_attr_set(network.get(mpnet.SEGMENTS)):
raise mpnet.SegmentsSetInConjunctionWithProviders()
# convert to transport zone list
network[mpnet.SEGMENTS] = [
{pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE],
pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK],
pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}]
del network[pnet.NETWORK_TYPE]
del network[pnet.PHYSICAL_NETWORK]
del network[pnet.SEGMENTATION_ID]
return False
if attr.is_attr_set(mpnet.SEGMENTS):
return True
def create_network(self, context, network):
net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data)
self._ensure_default_security_group(context, tenant_id)
# Process the provider network extension
provider_type = self._convert_to_transport_zones_dict(net_data)
self._validate_provider_create(context, net_data)
# Replace ATTR_NOT_SPECIFIED with None before sending to NVP
for key, value in network['network'].iteritems():
if value is attr.ATTR_NOT_SPECIFIED:
net_data[key] = None
# FIXME(arosen) implement admin_state_up = False in NVP
if net_data['admin_state_up'] is False:
LOG.warning(_("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for "
"network %s"), net_data.get('name', '<unknown>'))
transport_zone_config = self._convert_to_nvp_transport_zones(
self.cluster, net_data)
external = net_data.get(ext_net_extn.EXTERNAL)
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
lswitch = nvplib.create_lswitch(
self.cluster, tenant_id, net_data.get('name'),
transport_zone_config,
shared=net_data.get(attr.SHARED))
net_data['id'] = lswitch['uuid']
with context.session.begin(subtransactions=True):
new_net = super(NvpPluginV2, self).create_network(context,
network)
# Ensure there's an id in net_data
net_data['id'] = new_net['id']
# Process port security extension
self._process_network_port_security_create(
context, net_data, new_net)
# DB Operations for setting the network as external
self._process_l3_create(context, new_net, net_data)
# Process QoS queue extension
net_queue_id = net_data.get(ext_qos.QUEUE)
if net_queue_id:
# Raises if not found
self.get_qos_queue(context, net_queue_id)
self._process_network_queue_mapping(
context, new_net, net_queue_id)
if (net_data.get(mpnet.SEGMENTS) and
isinstance(provider_type, bool)):
net_bindings = []
for tz in net_data[mpnet.SEGMENTS]:
net_bindings.append(nicira_db.add_network_binding(
context.session, new_net['id'],
tz.get(pnet.NETWORK_TYPE),
tz.get(pnet.PHYSICAL_NETWORK),
tz.get(pnet.SEGMENTATION_ID, 0)))
if provider_type:
nicira_db.set_multiprovider_network(context.session,
new_net['id'])
self._extend_network_dict_provider(context, new_net,
provider_type,
net_bindings)
self.handle_network_dhcp_access(context, new_net,
action='create_network')
return new_net
def delete_network(self, context, id):
external = self._network_is_external(context, id)
# Before deleting ports, ensure the peer of a NVP logical
# port with a patch attachment is removed too
port_filter = {'network_id': [id],
'device_owner': ['network:router_interface']}
router_iface_ports = self.get_ports(context, filters=port_filter)
for port in router_iface_ports:
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id)
super(NvpPluginV2, self).delete_network(context, id)
# clean up network owned ports
for port in router_iface_ports:
try:
if nvp_port_id:
nvplib.delete_peer_router_lport(self.cluster,
port['device_id'],
nvp_switch_id,
nvp_port_id)
else:
LOG.warning(_("A nvp lport identifier was not found for "
"neutron port '%s'. Unable to remove "
"the peer router port for this switch port"),
port['id'])
except (TypeError, KeyError,
NvpApiClient.NvpApiException,
NvpApiClient.ResourceNotFound):
# Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing
# to do here
LOG.warning(_("Ignoring exception as this means the peer for "
"port '%s' has already been deleted."),
nvp_port_id)
# Do not go to NVP for external networks
if not external:
try:
lswitch_ids = [ls['uuid'] for ls in
nvplib.get_lswitches(self.cluster, id)]
nvplib.delete_networks(self.cluster, id, lswitch_ids)
LOG.debug(_("delete_network completed for tenant: %s"),
context.tenant_id)
except q_exc.NotFound:
LOG.warning(_("Did not found lswitch %s in NVP"), id)
self.handle_network_dhcp_access(context, id, action='delete_network')
def get_network(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
# goto to the plugin DB and fetch the network
network = self._get_network(context, id)
if (self.nvp_sync_opts.always_read_status or
fields and 'status' in fields):
# External networks are not backed by nvp lswitches
if not network.external:
# Perform explicit state synchronization
self._synchronizer.synchronize_network(context, network)
# Don't do field selection here otherwise we won't be able
# to add provider networks fields
net_result = self._make_network_dict(network)
self._extend_network_dict_provider(context, net_result)
return self._fields(net_result, fields)
def get_networks(self, context, filters=None, fields=None):
filters = filters or {}
with context.session.begin(subtransactions=True):
networks = super(NvpPluginV2, self).get_networks(context, filters)
for net in networks:
self._extend_network_dict_provider(context, net)
return [self._fields(network, fields) for network in networks]
def update_network(self, context, id, network):
pnet._raise_if_updates_provider_attributes(network['network'])
if network["network"].get("admin_state_up") is False:
raise NotImplementedError(_("admin_state_up=False networks "
"are not supported."))
with context.session.begin(subtransactions=True):
net = super(NvpPluginV2, self).update_network(context, id, network)
if psec.PORTSECURITY in network['network']:
self._process_network_port_security_update(
context, network['network'], net)
net_queue_id = network['network'].get(ext_qos.QUEUE)
if net_queue_id:
self._delete_network_queue_mapping(context, id)
self._process_network_queue_mapping(context, net, net_queue_id)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
return net
def create_port(self, context, port):
# If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED
# then we pass the port to the policy engine. The reason why we don't
# pass the value to the policy engine when the port is
# ATTR_NOT_SPECIFIED is for the case where a port is created on a
# shared network that is not owned by the tenant.
port_data = port['port']
with context.session.begin(subtransactions=True):
# First we allocate port in neutron database
neutron_db = super(NvpPluginV2, self).create_port(context, port)
neutron_port_id = neutron_db['id']
# Update fields obtained from neutron db (eg: MAC address)
port["port"].update(neutron_db)
self.handle_port_metadata_access(context, neutron_db)
# port security extension checks
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, port_data)
port_data[psec.PORTSECURITY] = port_security
self._process_port_port_security_create(
context, port_data, neutron_db)
# allowed address pair checks
if attr.is_attr_set(port_data.get(addr_pair.ADDRESS_PAIRS)):
if not port_security:
raise addr_pair.AddressPairAndPortSecurityRequired()
else:
self._process_create_allowed_address_pairs(
context, neutron_db,
port_data[addr_pair.ADDRESS_PAIRS])
else:
# remove ATTR_NOT_SPECIFIED
port_data[addr_pair.ADDRESS_PAIRS] = None
# security group extension checks
if port_security and has_ip:
self._ensure_default_security_group_on_port(context, port)
elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
port_data[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._process_port_create_security_group(
context, port_data, port_data[ext_sg.SECURITYGROUPS])
# QoS extension checks
port_queue_id = self._check_for_queue_and_create(
context, port_data)
self._process_port_queue_mapping(
context, port_data, port_queue_id)
if (isinstance(port_data.get(mac_ext.MAC_LEARNING), bool)):
self._create_mac_learning_state(context, port_data)
elif mac_ext.MAC_LEARNING in port_data:
port_data.pop(mac_ext.MAC_LEARNING)
LOG.debug(_("create_port completed on NVP for tenant "
"%(tenant_id)s: (%(id)s)"), port_data)
self._process_portbindings_create_and_update(context,
port['port'],
port_data)
# DB Operation is complete, perform NVP operation
try:
port_data = port['port'].copy()
port_create_func = self._port_drivers['create'].get(
port_data['device_owner'],
self._port_drivers['create']['default'])
port_create_func(context, port_data)
except q_exc.NotFound:
LOG.warning(_("Logical switch for network %s was not "
"found in NVP."), port_data['network_id'])
# Put port in error on quantum DB
with context.session.begin(subtransactions=True):
port = self._get_port(context, neutron_port_id)
port_data['status'] = constants.PORT_STATUS_ERROR
port['status'] = port_data['status']
context.session.add(port)
except Exception:
# Port must be removed from Quantum DB
with excutils.save_and_reraise_exception():
LOG.error(_("Unable to create port or set port "
"attachment in NVP."))
with context.session.begin(subtransactions=True):
self._delete_port(context, neutron_port_id)
self.handle_port_dhcp_access(context, port_data, action='create_port')
return port_data
def update_port(self, context, id, port):
changed_fixed_ips = 'fixed_ips' in port['port']
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
port)
has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
with context.session.begin(subtransactions=True):
ret_port = super(NvpPluginV2, self).update_port(
context, id, port)
# Save current mac learning state to check whether it's
# being updated or not
old_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING)
# copy values over - except fixed_ips as
# they've already been processed
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
tenant_id = self._get_tenant_id_for_create(context, ret_port)
# populate port_security setting
if psec.PORTSECURITY not in port['port']:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
has_ip = self._ip_on_port(ret_port)
# validate port security and allowed address pairs
if not ret_port[psec.PORTSECURITY]:
# has address pairs in request
if has_addr_pairs:
raise addr_pair.AddressPairAndPortSecurityRequired()
elif not delete_addr_pairs:
# check if address pairs are in db
ret_port[addr_pair.ADDRESS_PAIRS] = (
self.get_allowed_address_pairs(context, id))
if ret_port[addr_pair.ADDRESS_PAIRS]:
raise addr_pair.AddressPairAndPortSecurityRequired()
if (delete_addr_pairs or has_addr_pairs):
# delete address pairs and read them in
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS])
elif changed_fixed_ips:
self._check_fixed_ips_and_address_pairs_no_overlap(context,
ret_port)
# checks if security groups were updated adding/modifying
# security groups, port security is set and port has ip
if not (has_ip and ret_port[psec.PORTSECURITY]):
if has_security_groups:
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Update did not have security groups passed in. Check
# that port does not have any security groups already on it.
filters = {'port_id': [id]}
security_groups = (
super(NvpPluginV2, self)._get_port_security_group_bindings(
context, filters)
)
if security_groups and not delete_security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
if (delete_security_groups or has_security_groups):
# delete the port binding and read it with the new rules.
self._delete_port_security_group_bindings(context, id)
sgids = self._get_security_groups_on_port(context, port)
self._process_port_create_security_group(context, ret_port,
sgids)
if psec.PORTSECURITY in port['port']:
self._process_port_port_security_update(
context, port['port'], ret_port)
port_queue_id = self._check_for_queue_and_create(
context, ret_port)
# Populate the mac learning attribute
new_mac_learning_state = port['port'].get(mac_ext.MAC_LEARNING)
if (new_mac_learning_state is not None and
old_mac_learning_state != new_mac_learning_state):
self._update_mac_learning_state(context, id,
new_mac_learning_state)
ret_port[mac_ext.MAC_LEARNING] = new_mac_learning_state
self._delete_port_queue_mapping(context, ret_port['id'])
self._process_port_queue_mapping(context, ret_port,
port_queue_id)
LOG.warn(_("Update port request: %s"), port)
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id)
if nvp_port_id:
try:
nvplib.update_port(self.cluster,
nvp_switch_id,
nvp_port_id, id, tenant_id,
ret_port['name'], ret_port['device_id'],
ret_port['admin_state_up'],
ret_port['mac_address'],
ret_port['fixed_ips'],
ret_port[psec.PORTSECURITY],
ret_port[ext_sg.SECURITYGROUPS],
ret_port[ext_qos.QUEUE],
ret_port.get(mac_ext.MAC_LEARNING),
ret_port.get(addr_pair.ADDRESS_PAIRS))
# Update the port status from nvp. If we fail here hide it
# since the port was successfully updated but we were not
# able to retrieve the status.
ret_port['status'] = nvplib.get_port_status(
self.cluster, ret_port['network_id'],
nvp_port_id)
# FIXME(arosen) improve exception handling.
except Exception:
ret_port['status'] = constants.PORT_STATUS_ERROR
LOG.exception(_("Unable to update port id: %s."),
nvp_port_id)
# If nvp_port_id is not in database or in nvp put in error state.
else:
ret_port['status'] = constants.PORT_STATUS_ERROR
self._process_portbindings_create_and_update(context,
port['port'],
ret_port)
return ret_port
def delete_port(self, context, id, l3_port_check=True,
nw_gw_port_check=True):
"""Deletes a port on a specified Virtual Network.
If the port contains a remote interface attachment, the remote
interface is first un-plugged and then the port is deleted.
:returns: None
:raises: exception.PortInUse
:raises: exception.PortNotFound
:raises: exception.NetworkNotFound
"""
# if needed, check to see if this is a port owned by
# a l3 router. If so, we should prevent deletion here
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
neutron_db_port = self.get_port(context, id)
# Perform the same check for ports owned by layer-2 gateways
if nw_gw_port_check:
self.prevent_network_gateway_port_deletion(context,
neutron_db_port)
port_delete_func = self._port_drivers['delete'].get(
neutron_db_port['device_owner'],
self._port_drivers['delete']['default'])
port_delete_func(context, neutron_db_port)
self.disassociate_floatingips(context, id)
with context.session.begin(subtransactions=True):
queue = self._get_port_queue_bindings(context, {'port_id': [id]})
# metadata_dhcp_host_route
self.handle_port_metadata_access(
context, neutron_db_port, is_delete=True)
super(NvpPluginV2, self).delete_port(context, id)
# Delete qos queue if possible
if queue:
self.delete_qos_queue(context, queue[0]['queue_id'], False)
self.handle_port_dhcp_access(
context, neutron_db_port, action='delete_port')
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
if (self.nvp_sync_opts.always_read_status or
fields and 'status' in fields):
# Perform explicit state synchronization
db_port = self._get_port(context, id)
self._synchronizer.synchronize_port(
context, db_port)
return self._make_port_dict(db_port, fields)
else:
return super(NvpPluginV2, self).get_port(context, id, fields)
def get_router(self, context, id, fields=None):
if (self.nvp_sync_opts.always_read_status or
fields and 'status' in fields):
db_router = self._get_router(context, id)
# Perform explicit state synchronization
self._synchronizer.synchronize_router(
context, db_router)
return self._make_router_dict(db_router, fields)
else:
return super(NvpPluginV2, self).get_router(context, id, fields)
def _create_lrouter(self, context, router, nexthop):
tenant_id = self._get_tenant_id_for_create(context, router)
name = router['name']
distributed = router.get('distributed')
try:
lrouter = nvplib.create_lrouter(
self.cluster, tenant_id, name, nexthop,
distributed=attr.is_attr_set(distributed) and distributed)
except nvp_exc.NvpInvalidVersion:
msg = _("Cannot create a distributed router with the NVP "
"platform currently in execution. Please, try "
"without specifying the 'distributed' attribute.")
LOG.exception(msg)
raise q_exc.BadRequest(resource='router', msg=msg)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to create logical router on NVP Platform"))
# Create the port here - and update it later if we have gw_info
try:
self._create_and_attach_router_port(
self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True},
"L3GatewayAttachment",
self.cluster.default_l3_gw_service_uuid)
except nvp_exc.NvpPluginException:
LOG.exception(_("Unable to create L3GW port on logical router "
"%(router_uuid)s. Verify Default Layer-3 Gateway "
"service %(def_l3_gw_svc)s id is correct"),
{'router_uuid': lrouter['uuid'],
'def_l3_gw_svc':
self.cluster.default_l3_gw_service_uuid})
# Try and remove logical router from NVP
nvplib.delete_lrouter(self.cluster, lrouter['uuid'])
# Return user a 500 with an apter message
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to create router %s") % router['name'])
lrouter['status'] = plugin_const.ACTIVE
return lrouter
def create_router(self, context, router):
# NOTE(salvatore-orlando): We completely override this method in
# order to be able to use the NVP ID as Neutron ID
# TODO(salvatore-orlando): Propose upstream patch for allowing
# 3rd parties to specify IDs as we do with l2 plugin
r = router['router']
has_gw_info = False
tenant_id = self._get_tenant_id_for_create(context, r)
# default value to set - nvp wants it (even if we don't have it)
nexthop = NVP_DEFAULT_NEXTHOP
# if external gateway info are set, then configure nexthop to
# default external gateway
if 'external_gateway_info' in r and r.get('external_gateway_info'):
has_gw_info = True
gw_info = r['external_gateway_info']
del r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NVP router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise q_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
lrouter = self._create_lrouter(context, r, nexthop)
# Use NVP identfier for Neutron resource
r['id'] = lrouter['uuid']
# Update 'distributed' with value returned from NVP
# This will be useful for setting the value if the API request
# did not specify any value for the 'distributed' attribute
# Platforms older than 3.x do not support the attribute
r['distributed'] = lrouter.get('distributed', False)
# TODO(salv-orlando): Deal with backend object removal in case
# of db failures
with context.session.begin(subtransactions=True):
# Transaction nesting is needed to avoid foreign key violations
# when processing the distributed router binding
with context.session.begin(subtransactions=True):
router_db = l3_db.Router(id=lrouter['uuid'],
tenant_id=tenant_id,
name=r['name'],
admin_state_up=r['admin_state_up'],
status=lrouter['status'])
self._process_nsx_router_create(context, router_db, r)
context.session.add(router_db)
if has_gw_info:
# NOTE(salv-orlando): This operation has been moved out of the
# database transaction since it performs several NVP queries,
# ithis ncreasing the risk of deadlocks between eventlet and
# sqlalchemy operations.
# Set external gateway and remove router in case of failure
try:
self._update_router_gw_info(context, router_db['id'], gw_info)
except (q_exc.NeutronException, NvpApiClient.NvpApiException):
with excutils.save_and_reraise_exception():
# As setting gateway failed, the router must be deleted
# in order to ensure atomicity
router_id = router_db['id']
LOG.warn(_("Failed to set gateway info for router being "
"created:%s - removing router"), router_id)
self.delete_router(context, router_id)
LOG.info(_("Create router failed while setting external "
"gateway. Router:%s has been removed from "
"DB and backend"),
router_id)
router = self._make_router_dict(router_db)
return router
def _update_lrouter(self, context, router_id, name, nexthop, routes=None):
return nvplib.update_lrouter(
self.cluster, router_id, name,
nexthop, routes=routes)
def _update_lrouter_routes(self, router_id, routes):
nvplib.update_explicit_routes_lrouter(
self.cluster, router_id, routes)
def update_router(self, context, router_id, router):
# Either nexthop is updated or should be kept as it was before
r = router['router']
nexthop = None
if 'external_gateway_info' in r and r.get('external_gateway_info'):
gw_info = r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NVP router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise q_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
try:
for route in r.get('routes', []):
if route['destination'] == '0.0.0.0/0':
msg = _("'routes' cannot contain route '0.0.0.0/0', "
"this must be updated through the default "
"gateway attribute")
raise q_exc.BadRequest(resource='router', msg=msg)
previous_routes = self._update_lrouter(
context, router_id, r.get('name'),
nexthop, routes=r.get('routes'))
# NOTE(salv-orlando): The exception handling below is not correct, but
# unfortunately nvplib raises a neutron notfound exception when an
# object is not found in the underlying backend
except q_exc.NotFound:
# Put the router in ERROR status
with context.session.begin(subtransactions=True):
router_db = self._get_router(context, router_id)
router_db['status'] = constants.NET_STATUS_ERROR
raise nvp_exc.NvpPluginException(
err_msg=_("Logical router %s not found "
"on NVP Platform") % router_id)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to update logical router on NVP Platform"))
except nvp_exc.NvpInvalidVersion:
msg = _("Request cannot contain 'routes' with the NVP "
"platform currently in execution. Please, try "
"without specifying the static routes.")
LOG.exception(msg)
raise q_exc.BadRequest(resource='router', msg=msg)
try:
return super(NvpPluginV2, self).update_router(context,
router_id, router)
except (extraroute.InvalidRoutes,
extraroute.RouterInterfaceInUseByRoute,
extraroute.RoutesExhausted):
with excutils.save_and_reraise_exception():
# revert changes made to NVP
self._update_lrouter_routes(
router_id, previous_routes)
def _delete_lrouter(self, context, id):
nvplib.delete_lrouter(self.cluster, id)
def delete_router(self, context, router_id):
with context.session.begin(subtransactions=True):
# TODO(salv-orlando): This call should have no effect on delete
# router, but if it does, it should not happen within a
# transaction, and it should be restored on rollback
self.handle_router_metadata_access(
context, router_id, interface=None)
# Pre-delete checks
# NOTE(salv-orlando): These checks will be repeated anyway when
# calling the superclass. This is wasteful, but is the simplest
# way of ensuring a consistent removal of the router both in
# the neutron Database and in the NVP backend.
# TODO(salv-orlando): split pre-delete checks and actual
# deletion in superclass.
# Ensure that the router is not used
fips = self.get_floatingips_count(
context.elevated(), filters={'router_id': [router_id]})
if fips:
raise l3.RouterInUse(router_id=router_id)
device_filter = {'device_id': [router_id],
'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}
ports = self._core_plugin.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
# It is safe to remove the router from the database, so remove it
# from the backend
try:
self._delete_lrouter(context, router_id)
except q_exc.NotFound:
# This is not a fatal error, but needs to be logged
LOG.warning(_("Logical router '%s' not found "
"on NVP Platform"), router_id)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=(_("Unable to delete logical router '%s' "
"on NVP Platform") % router_id))
# Perform the actual delete on the Neutron DB
try:
super(NvpPluginV2, self).delete_router(context, router_id)
except Exception:
# NOTE(salv-orlando): Broad catching as the following action
# needs to be performed for every exception.
# Put the router in ERROR status
LOG.exception(_("Failure while removing router:%s from database. "
"The router will be put in ERROR status"),
router_id)
with context.session.begin(subtransactions=True):
router_db = self._get_router(context, router_id)
router_db['status'] = constants.NET_STATUS_ERROR
def _add_subnet_snat_rule(self, router, subnet):
gw_port = router.gw_port
if gw_port and router.enable_snat:
# There is a change gw_port might have multiple IPs
# In that case we will consider only the first one
if gw_port.get('fixed_ips'):
snat_ip = gw_port['fixed_ips'][0]['ip_address']
cidr_prefix = int(subnet['cidr'].split('/')[1])
nvplib.create_lrouter_snat_rule(
self.cluster, router['id'], snat_ip, snat_ip,
order=NVP_EXTGW_NAT_RULES_ORDER - cidr_prefix,
match_criteria={'source_ip_addresses': subnet['cidr']})
def _delete_subnet_snat_rule(self, router, subnet):
# Remove SNAT rule if external gateway is configured
if router.gw_port:
nvplib.delete_nat_rules_by_match(
self.cluster, router['id'], "SourceNatRule",
max_num_expected=1, min_num_expected=1,
source_ip_addresses=subnet['cidr'])
def add_router_interface(self, context, router_id, interface_info):
# When adding interface by port_id we need to create the
# peer port on the nvp logical router in this routine
port_id = interface_info.get('port_id')
router_iface_info = super(NvpPluginV2, self).add_router_interface(
context, router_id, interface_info)
# router_iface_info will always have a subnet_id attribute
subnet_id = router_iface_info['subnet_id']
if port_id:
port_data = self._get_port(context, port_id)
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_id)
# Unplug current attachment from lswitch port
nvplib.plug_interface(self.cluster, nvp_switch_id,
nvp_port_id, "NoAttachment")
# Create logical router port and plug patch attachment
self._create_and_attach_router_port(
self.cluster, context, router_id, port_data,
"PatchAttachment", nvp_port_id, subnet_ids=[subnet_id])
subnet = self._get_subnet(context, subnet_id)
# If there is an external gateway we need to configure the SNAT rule.
# Fetch router from DB
router = self._get_router(context, router_id)
self._add_subnet_snat_rule(router, subnet)
nvplib.create_lrouter_nosnat_rule(
self.cluster, router_id,
order=NVP_NOSNAT_RULES_ORDER,
match_criteria={'destination_ip_addresses': subnet['cidr']})
# Ensure the NVP logical router has a connection to a 'metadata access'
# network (with a proxy listening on its DHCP port), by creating it
# if needed.
self.handle_router_metadata_access(
context, router_id, interface=router_iface_info)
LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s "
"and router:%(router_id)s"),
{'subnet_id': subnet_id, 'router_id': router_id})
return router_iface_info
def remove_router_interface(self, context, router_id, interface_info):
# The code below is duplicated from base class, but comes handy
# as we need to retrieve the router port id before removing the port
subnet = None
subnet_id = None
if 'port_id' in interface_info:
port_id = interface_info['port_id']
# find subnet_id - it is need for removing the SNAT rule
port = self._get_port(context, port_id)
if port.get('fixed_ips'):
subnet_id = port['fixed_ips'][0]['subnet_id']
if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and
port['device_id'] == router_id):
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet = self._get_subnet(context, subnet_id)
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
port_id = p['id']
break
else:
raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
subnet_id=subnet_id)
# Finally remove the data from the Neutron DB
# This will also destroy the port on the logical switch
info = super(NvpPluginV2, self).remove_router_interface(
context, router_id, interface_info)
# Ensure the connection to the 'metadata access network'
# is removed (with the network) if this the last subnet
# on the router
self.handle_router_metadata_access(
context, router_id, interface=info)
try:
if not subnet:
subnet = self._get_subnet(context, subnet_id)
router = self._get_router(context, router_id)
# If router is enabled_snat = False there are no snat rules to
# delete.
if router.enable_snat:
self._delete_subnet_snat_rule(router, subnet)
# Relax the minimum expected number as the nosnat rules
# do not exist in 2.x deployments
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "NoSourceNatRule",
max_num_expected=1, min_num_expected=0,
destination_ip_addresses=subnet['cidr'])
except NvpApiClient.ResourceNotFound:
raise nvp_exc.NvpPluginException(
err_msg=(_("Logical router resource %s not found "
"on NVP platform") % router_id))
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=(_("Unable to update logical router"
"on NVP Platform")))
return info
def _retrieve_and_delete_nat_rules(self, context, floating_ip_address,
internal_ip, router_id,
min_num_rules_expected=0):
try:
# Remove DNAT rule for the floating IP
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "DestinationNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
destination_ip_addresses=floating_ip_address)
# Remove SNAT rules for the floating IP
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "SourceNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
source_ip_addresses=internal_ip)
nvplib.delete_nat_rules_by_match(
self.cluster, router_id, "SourceNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
destination_ip_addresses=internal_ip)
except NvpApiClient.NvpApiException:
LOG.exception(_("An error occurred while removing NAT rules "
"on the NVP platform for floating ip:%s"),
floating_ip_address)
raise
except nvp_exc.NvpNatRuleMismatch:
# Do not surface to the user
LOG.warning(_("An incorrect number of matching NAT rules "
"was found on the NVP platform"))
def _remove_floatingip_address(self, context, fip_db):
# Remove floating IP address from logical router port
# Fetch logical port of router's external gateway
router_id = fip_db.router_id
nvp_gw_port_id = nvplib.find_router_gw_port(
context, self.cluster, router_id)['uuid']
ext_neutron_port_db = self._get_port(context.elevated(),
fip_db.floating_port_id)
nvp_floating_ips = self._build_ip_address_list(
context.elevated(), ext_neutron_port_db['fixed_ips'])
nvplib.update_lrouter_port_ips(self.cluster,
router_id,
nvp_gw_port_id,
ips_to_add=[],
ips_to_remove=nvp_floating_ips)
def _get_fip_assoc_data(self, context, fip, floatingip_db):
if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and
not ('port_id' in fip and fip['port_id'])):
msg = _("fixed_ip_address cannot be specified without a port_id")
raise q_exc.BadRequest(resource='floatingip', msg=msg)
port_id = internal_ip = router_id = None
if 'port_id' in fip and fip['port_id']:
port_qry = context.session.query(l3_db.FloatingIP)
try:
port_qry.filter_by(fixed_port_id=fip['port_id']).one()
raise l3.FloatingIPPortAlreadyAssociated(
port_id=fip['port_id'],
fip_id=floatingip_db['id'],
floating_ip_address=floatingip_db['floating_ip_address'],
fixed_ip=floatingip_db['fixed_ip_address'],
net_id=floatingip_db['floating_network_id'])
except sa_exc.NoResultFound:
pass
port_id, internal_ip, router_id = self.get_assoc_data(
context,
fip,
floatingip_db['floating_network_id'])
return (port_id, internal_ip, router_id)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
"""Update floating IP association data.
Overrides method from base class.
The method is augmented for creating NAT rules in the process.
"""
# Store router currently serving the floating IP
old_router_id = floatingip_db.router_id
port_id, internal_ip, router_id = self._get_fip_assoc_data(
context, fip, floatingip_db)
floating_ip = floatingip_db['floating_ip_address']
# If there's no association router_id will be None
if router_id:
self._retrieve_and_delete_nat_rules(context,
floating_ip,
internal_ip,
router_id)
# Fetch logical port of router's external gateway
# Fetch logical port of router's external gateway
nvp_floating_ips = self._build_ip_address_list(
context.elevated(), external_port['fixed_ips'])
floating_ip = floatingip_db['floating_ip_address']
# Retrieve and delete existing NAT rules, if any
if old_router_id:
# Retrieve the current internal ip
_p, _s, old_internal_ip = self._internal_fip_assoc_data(
context, {'id': floatingip_db.id,
'port_id': floatingip_db.fixed_port_id,
'fixed_ip_address': floatingip_db.fixed_ip_address,
'tenant_id': floatingip_db.tenant_id})
nvp_gw_port_id = nvplib.find_router_gw_port(
context, self.cluster, old_router_id)['uuid']
self._retrieve_and_delete_nat_rules(
context, floating_ip, old_internal_ip, old_router_id)
nvplib.update_lrouter_port_ips(
self.cluster, old_router_id, nvp_gw_port_id,
ips_to_add=[], ips_to_remove=nvp_floating_ips)
if router_id:
nvp_gw_port_id = nvplib.find_router_gw_port(
context, self.cluster, router_id)['uuid']
# Re-create NAT rules only if a port id is specified
if fip.get('port_id'):
try:
# Setup DNAT rules for the floating IP
nvplib.create_lrouter_dnat_rule(
self.cluster, router_id, internal_ip,
order=NVP_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'destination_ip_addresses':
floating_ip})
# Setup SNAT rules for the floating IP
# Create a SNAT rule for enabling connectivity to the
# floating IP from the same network as the internal port
# Find subnet id for internal_ip from fixed_ips
internal_port = self._get_port(context, port_id)
# Cchecks not needed on statements below since otherwise
# _internal_fip_assoc_data would have raised
subnet_ids = [ip['subnet_id'] for ip in
internal_port['fixed_ips'] if
ip['ip_address'] == internal_ip]
internal_subnet_cidr = self._build_ip_address_list(
context, internal_port['fixed_ips'],
subnet_ids=subnet_ids)[0]
nvplib.create_lrouter_snat_rule(
self.cluster, router_id, floating_ip, floating_ip,
order=NVP_NOSNAT_RULES_ORDER - 1,
match_criteria={'source_ip_addresses':
internal_subnet_cidr,
'destination_ip_addresses':
internal_ip})
# setup snat rule such that src ip of a IP packet when
# using floating is the floating ip itself.
nvplib.create_lrouter_snat_rule(
self.cluster, router_id, floating_ip, floating_ip,
order=NVP_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'source_ip_addresses': internal_ip})
# Add Floating IP address to router_port
nvplib.update_lrouter_port_ips(self.cluster,
router_id,
nvp_gw_port_id,
ips_to_add=nvp_floating_ips,
ips_to_remove=[])
except NvpApiClient.NvpApiException:
LOG.exception(_("An error occurred while creating NAT "
"rules on the NVP platform for floating "
"ip:%(floating_ip)s mapped to "
"internal ip:%(internal_ip)s"),
{'floating_ip': floating_ip,
'internal_ip': internal_ip})
msg = _("Failed to update NAT rules for floatingip update")
raise nvp_exc.NvpPluginException(err_msg=msg)
floatingip_db.update({'fixed_ip_address': internal_ip,
'fixed_port_id': port_id,
'router_id': router_id})
def delete_floatingip(self, context, id):
fip_db = self._get_floatingip(context, id)
# Check whether the floating ip is associated or not
if fip_db.fixed_port_id:
self._retrieve_and_delete_nat_rules(context,
fip_db.floating_ip_address,
fip_db.fixed_ip_address,
fip_db.router_id,
min_num_rules_expected=1)
# Remove floating IP address from logical router port
self._remove_floatingip_address(context, fip_db)
return super(NvpPluginV2, self).delete_floatingip(context, id)
def disassociate_floatingips(self, context, port_id):
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(fixed_port_id=port_id).one()
self._retrieve_and_delete_nat_rules(context,
fip_db.floating_ip_address,
fip_db.fixed_ip_address,
fip_db.router_id,
min_num_rules_expected=1)
self._remove_floatingip_address(context, fip_db)
except sa_exc.NoResultFound:
LOG.debug(_("The port '%s' is not associated with floating IPs"),
port_id)
except q_exc.NotFound:
LOG.warning(_("Nat rules not found in nvp for port: %s"), id)
super(NvpPluginV2, self).disassociate_floatingips(context, port_id)
def create_network_gateway(self, context, network_gateway):
"""Create a layer-2 network gateway.
Create the gateway service on NVP platform and corresponding data
structures in Neutron datase.
"""
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Need to re-do authZ checks here in order to avoid creation on NVP
gw_data = network_gateway[networkgw.RESOURCE_NAME.replace('-', '_')]
tenant_id = self._get_tenant_id_for_create(context, gw_data)
devices = gw_data['devices']
# Populate default physical network where not specified
for device in devices:
if not device.get('interface_name'):
device['interface_name'] = self.cluster.default_interface_name
try:
nvp_res = nvplib.create_l2_gw_service(self.cluster, tenant_id,
gw_data['name'], devices)
nvp_uuid = nvp_res.get('uuid')
except NvpApiClient.Conflict:
raise nvp_exc.NvpL2GatewayAlreadyInUse(gateway=gw_data['name'])
except NvpApiClient.NvpApiException:
err_msg = _("Unable to create l2_gw_service for: %s") % gw_data
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
gw_data['id'] = nvp_uuid
return super(NvpPluginV2, self).create_network_gateway(context,
network_gateway)
def delete_network_gateway(self, context, id):
"""Remove a layer-2 network gateway.
Remove the gateway service from NVP platform and corresponding data
structures in Neutron datase.
"""
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
with context.session.begin(subtransactions=True):
try:
super(NvpPluginV2, self).delete_network_gateway(context, id)
nvplib.delete_l2_gw_service(self.cluster, id)
except NvpApiClient.ResourceNotFound:
# Do not cause a 500 to be returned to the user if
# the corresponding NVP resource does not exist
LOG.exception(_("Unable to remove gateway service from "
"NVP plaform - the resource was not found"))
def get_network_gateway(self, context, id, fields=None):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NvpPluginV2, self).get_network_gateway(context,
id, fields)
def get_network_gateways(self, context, filters=None, fields=None):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Ensure the tenant_id attribute is populated on returned gateways
net_gateways = super(NvpPluginV2,
self).get_network_gateways(context,
filters,
fields)
return net_gateways
def update_network_gateway(self, context, id, network_gateway):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Update gateway on backend when there's a name change
name = network_gateway[networkgw.RESOURCE_NAME].get('name')
if name:
try:
nvplib.update_l2_gw_service(self.cluster, id, name)
except NvpApiClient.NvpApiException:
# Consider backend failures as non-fatal, but still warn
# because this might indicate something dodgy is going on
LOG.warn(_("Unable to update name on NVP backend "
"for network gateway: %s"), id)
return super(NvpPluginV2, self).update_network_gateway(
context, id, network_gateway)
def connect_network(self, context, network_gateway_id,
network_mapping_info):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NvpPluginV2, self).connect_network(
context, network_gateway_id, network_mapping_info)
def disconnect_network(self, context, network_gateway_id,
network_mapping_info):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NvpPluginV2, self).disconnect_network(
context, network_gateway_id, network_mapping_info)
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means a we are creating a default security
group and we don't need to check if one exists.
"""
s = security_group.get('security_group')
tenant_id = self._get_tenant_id_for_create(context, s)
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
nvp_secgroup = nvplib.create_security_profile(self.cluster,
tenant_id, s)
security_group['security_group']['id'] = nvp_secgroup['uuid']
return super(NvpPluginV2, self).create_security_group(
context, security_group, default_sg)
def delete_security_group(self, context, security_group_id):
"""Delete a security group.
:param security_group_id: security group rule to remove.
"""
with context.session.begin(subtransactions=True):
security_group = super(NvpPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
if security_group['name'] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
filters = {'security_group_id': [security_group['id']]}
if super(NvpPluginV2, self)._get_port_security_group_bindings(
context, filters):
raise ext_sg.SecurityGroupInUse(id=security_group['id'])
nvplib.delete_security_profile(self.cluster,
security_group['id'])
return super(NvpPluginV2, self).delete_security_group(
context, security_group_id)
def _validate_security_group_rules(self, context, rules):
for rule in rules['security_group_rules']:
r = rule.get('security_group_rule')
port_based_proto = (self._get_ip_proto_number(r['protocol'])
in securitygroups_db.IP_PROTOCOL_MAP.values())
if (not port_based_proto and
(r['port_range_min'] is not None or
r['port_range_max'] is not None)):
msg = (_("Port values not valid for "
"protocol: %s") % r['protocol'])
raise q_exc.BadRequest(resource='security_group_rule',
msg=msg)
return super(NvpPluginV2, self)._validate_security_group_rules(context,
rules)
def create_security_group_rule(self, context, security_group_rule):
"""Create a single security group rule."""
bulk_rule = {'security_group_rules': [security_group_rule]}
return self.create_security_group_rule_bulk(context, bulk_rule)[0]
def create_security_group_rule_bulk(self, context, security_group_rule):
"""Create security group rules.
:param security_group_rule: list of rules to create
"""
s = security_group_rule.get('security_group_rules')
tenant_id = self._get_tenant_id_for_create(context, s)
# TODO(arosen) is there anyway we could avoid having the update of
# the security group rules in nvp outside of this transaction?
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(context, tenant_id)
security_group_id = self._validate_security_group_rules(
context, security_group_rule)
# Check to make sure security group exists
security_group = super(NvpPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
# Check for duplicate rules
self._check_for_duplicate_rules(context, s)
# gather all the existing security group rules since we need all
# of them to PUT to NVP.
combined_rules = self._merge_security_group_rules_with_current(
context, s, security_group['id'])
nvplib.update_security_group_rules(self.cluster,
security_group['id'],
combined_rules)
return super(
NvpPluginV2, self).create_security_group_rule_bulk_native(
context, security_group_rule)
def delete_security_group_rule(self, context, sgrid):
"""Delete a security group rule
:param sgrid: security group id to remove.
"""
with context.session.begin(subtransactions=True):
# determine security profile id
security_group_rule = (
super(NvpPluginV2, self).get_security_group_rule(
context, sgrid))
if not security_group_rule:
raise ext_sg.SecurityGroupRuleNotFound(id=sgrid)
sgid = security_group_rule['security_group_id']
current_rules = self._get_security_group_rules_nvp_format(
context, sgid, True)
self._remove_security_group_with_id_and_id_field(
current_rules, sgrid)
nvplib.update_security_group_rules(
self.cluster, sgid, current_rules)
return super(NvpPluginV2, self).delete_security_group_rule(context,
sgrid)
def create_qos_queue(self, context, qos_queue, check_policy=True):
q = qos_queue.get('qos_queue')
self._validate_qos_queue(context, q)
q['id'] = nvplib.create_lqueue(self.cluster,
self._nvp_lqueue(q))
return super(NvpPluginV2, self).create_qos_queue(context, qos_queue)
def delete_qos_queue(self, context, id, raise_in_use=True):
filters = {'queue_id': [id]}
queues = self._get_port_queue_bindings(context, filters)
if queues:
if raise_in_use:
raise ext_qos.QueueInUseByPort()
else:
return
nvplib.delete_lqueue(self.cluster, id)
return super(NvpPluginV2, self).delete_qos_queue(context, id)
| apache-2.0 |
eeshangarg/zulip | zerver/context_processors.py | 3 | 8897 | from typing import Any, Dict, Optional
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django.http import HttpRequest
from django.utils.html import escape
from django.utils.safestring import SafeString
from django.utils.translation import get_language
from version import (
LATEST_MAJOR_VERSION,
LATEST_RELEASE_ANNOUNCEMENT,
LATEST_RELEASE_VERSION,
ZULIP_VERSION,
)
from zerver.lib.exceptions import InvalidSubdomainError
from zerver.lib.realm_description import get_realm_rendered_description, get_realm_text_description
from zerver.lib.realm_icon import get_realm_icon_url
from zerver.lib.send_email import FromAddress
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm, UserProfile, get_realm
from zproject.backends import (
AUTH_BACKEND_NAME_MAP,
any_social_backend_enabled,
auth_enabled_helper,
get_external_method_dicts,
password_auth_enabled,
require_email_format_usernames,
)
DEFAULT_PAGE_PARAMS = {
"development_environment": settings.DEVELOPMENT,
"webpack_public_path": staticfiles_storage.url(settings.WEBPACK_BUNDLES),
}
def common_context(user: UserProfile) -> Dict[str, Any]:
"""Common context used for things like outgoing emails that don't
have a request.
"""
return {
"realm_uri": user.realm.uri,
"realm_name": user.realm.name,
"root_domain_uri": settings.ROOT_DOMAIN_URI,
"external_uri_scheme": settings.EXTERNAL_URI_SCHEME,
"external_host": settings.EXTERNAL_HOST,
"user_name": user.full_name,
}
def get_realm_from_request(request: HttpRequest) -> Optional[Realm]:
if hasattr(request, "user") and hasattr(request.user, "realm"):
return request.user.realm
if not hasattr(request, "realm"):
# We cache the realm object from this function on the request,
# so that functions that call get_realm_from_request don't
# need to do duplicate queries on the same realm while
# processing a single request.
subdomain = get_subdomain(request)
try:
request.realm = get_realm(subdomain)
except Realm.DoesNotExist:
request.realm = None
return request.realm
def get_valid_realm_from_request(request: HttpRequest) -> Realm:
realm = get_realm_from_request(request)
if realm is None:
raise InvalidSubdomainError()
return realm
def get_apps_page_url() -> str:
if settings.ZILENCER_ENABLED:
return "/apps/"
return "https://zulip.com/apps/"
def zulip_default_context(request: HttpRequest) -> Dict[str, Any]:
"""Context available to all Zulip Jinja2 templates that have a request
passed in. Designed to provide the long list of variables at the
bottom of this function in a wide range of situations: logged-in
or logged-out, subdomains or not, etc.
The main variable in the below is whether we know what realm the
user is trying to interact with.
"""
realm = get_realm_from_request(request)
if realm is None:
realm_uri = settings.ROOT_DOMAIN_URI
realm_name = None
realm_icon = None
else:
realm_uri = realm.uri
realm_name = realm.name
realm_icon = get_realm_icon_url(realm)
register_link_disabled = settings.REGISTER_LINK_DISABLED
login_link_disabled = settings.LOGIN_LINK_DISABLED
find_team_link_disabled = settings.FIND_TEAM_LINK_DISABLED
allow_search_engine_indexing = False
if (
settings.ROOT_DOMAIN_LANDING_PAGE
and get_subdomain(request) == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
):
register_link_disabled = True
login_link_disabled = True
find_team_link_disabled = False
allow_search_engine_indexing = True
apps_page_web = settings.ROOT_DOMAIN_URI + "/accounts/go/"
user_is_authenticated = False
if hasattr(request, "user") and hasattr(request.user, "is_authenticated"):
user_is_authenticated = request.user.is_authenticated
if settings.DEVELOPMENT:
secrets_path = "zproject/dev-secrets.conf"
settings_path = "zproject/dev_settings.py"
settings_comments_path = "zproject/prod_settings_template.py"
else:
secrets_path = "/etc/zulip/zulip-secrets.conf"
settings_path = "/etc/zulip/settings.py"
settings_comments_path = "/etc/zulip/settings.py"
support_email = FromAddress.SUPPORT
support_email_html_tag = SafeString(
f'<a href="mailto:{escape(support_email)}">{escape(support_email)}</a>'
)
default_page_params = {
**DEFAULT_PAGE_PARAMS,
"request_language": get_language(),
}
context = {
"root_domain_landing_page": settings.ROOT_DOMAIN_LANDING_PAGE,
"custom_logo_url": settings.CUSTOM_LOGO_URL,
"register_link_disabled": register_link_disabled,
"login_link_disabled": login_link_disabled,
"terms_of_service": settings.TERMS_OF_SERVICE,
"privacy_policy": settings.PRIVACY_POLICY,
"login_url": settings.HOME_NOT_LOGGED_IN,
"only_sso": settings.ONLY_SSO,
"external_host": settings.EXTERNAL_HOST,
"external_uri_scheme": settings.EXTERNAL_URI_SCHEME,
"realm_uri": realm_uri,
"realm_name": realm_name,
"realm_icon": realm_icon,
"root_domain_uri": settings.ROOT_DOMAIN_URI,
"apps_page_url": get_apps_page_url(),
"apps_page_web": apps_page_web,
"open_realm_creation": settings.OPEN_REALM_CREATION,
"development_environment": settings.DEVELOPMENT,
"support_email": support_email,
"support_email_html_tag": support_email_html_tag,
"find_team_link_disabled": find_team_link_disabled,
"password_min_length": settings.PASSWORD_MIN_LENGTH,
"password_min_guesses": settings.PASSWORD_MIN_GUESSES,
"zulip_version": ZULIP_VERSION,
"user_is_authenticated": user_is_authenticated,
"settings_path": settings_path,
"secrets_path": secrets_path,
"settings_comments_path": settings_comments_path,
"platform": request.client_name,
"allow_search_engine_indexing": allow_search_engine_indexing,
"landing_page_navbar_message": settings.LANDING_PAGE_NAVBAR_MESSAGE,
"default_page_params": default_page_params,
}
context["OPEN_GRAPH_URL"] = f"{realm_uri}{request.path}"
if realm is not None and realm.icon_source == realm.ICON_UPLOADED:
context["OPEN_GRAPH_IMAGE"] = urljoin(realm_uri, realm_icon)
return context
def login_context(request: HttpRequest) -> Dict[str, Any]:
realm = get_realm_from_request(request)
if realm is None:
realm_description = None
realm_invite_required = False
else:
realm_description = get_realm_rendered_description(realm)
realm_invite_required = realm.invite_required
context: Dict[str, Any] = {
"realm_invite_required": realm_invite_required,
"realm_description": realm_description,
"require_email_format_usernames": require_email_format_usernames(realm),
"password_auth_enabled": password_auth_enabled(realm),
"any_social_backend_enabled": any_social_backend_enabled(realm),
"two_factor_authentication_enabled": settings.TWO_FACTOR_AUTHENTICATION_ENABLED,
}
if realm is not None and realm.description:
context["OPEN_GRAPH_TITLE"] = realm.name
context["OPEN_GRAPH_DESCRIPTION"] = get_realm_text_description(realm)
# Add the keys for our standard authentication backends.
no_auth_enabled = True
for auth_backend_name in AUTH_BACKEND_NAME_MAP:
name_lower = auth_backend_name.lower()
key = f"{name_lower}_auth_enabled"
is_enabled = auth_enabled_helper([auth_backend_name], realm)
context[key] = is_enabled
if is_enabled:
no_auth_enabled = False
context["external_authentication_methods"] = get_external_method_dicts(realm)
context["no_auth_enabled"] = no_auth_enabled
# Include another copy of external_authentication_methods in page_params for use
# by the desktop client. We expand it with IDs of the <button> elements corresponding
# to the authentication methods.
context["page_params"] = dict(
external_authentication_methods=get_external_method_dicts(realm),
)
for auth_dict in context["page_params"]["external_authentication_methods"]:
auth_dict["button_id_suffix"] = "auth_button_{}".format(auth_dict["name"])
return context
def latest_info_context() -> Dict[str, str]:
context = {
"latest_release_version": LATEST_RELEASE_VERSION,
"latest_major_version": LATEST_MAJOR_VERSION,
"latest_release_announcement": LATEST_RELEASE_ANNOUNCEMENT,
}
return context
| apache-2.0 |
mars-knowsnothing/amos-bot | src/Lib/encodings/mac_croatian.py | 272 | 13633 | """ Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
'\u2122' # 0xAA -> TRADE MARK SIGN
'\xb4' # 0xAB -> ACUTE ACCENT
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
'\u221e' # 0xB0 -> INFINITY
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\u2206' # 0xB4 -> INCREMENT
'\xb5' # 0xB5 -> MICRO SIGN
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u220f' # 0xB8 -> N-ARY PRODUCT
'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
'\u222b' # 0xBA -> INTEGRAL
'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
'\xbf' # 0xC0 -> INVERTED QUESTION MARK
'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
'\u2248' # 0xC5 -> ALMOST EQUAL TO
'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\uf8ff' # 0xD8 -> Apple logo
'\xa9' # 0xD9 -> COPYRIGHT SIGN
'\u2044' # 0xDA -> FRACTION SLASH
'\u20ac' # 0xDB -> EURO SIGN
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2013' # 0xE0 -> EN DASH
'\xb7' # 0xE1 -> MIDDLE DOT
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u2030' # 0xE4 -> PER MILLE SIGN
'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u02dc' # 0xF7 -> SMALL TILDE
'\xaf' # 0xF8 -> MACRON
'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u02da' # 0xFB -> RING ABOVE
'\xb8' # 0xFC -> CEDILLA
'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xe6' # 0xFE -> LATIN SMALL LETTER AE
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
nharraud/b2share | invenio/legacy/bibsword/client_http.py | 13 | 6638 | # This file is part of Invenio.
# Copyright (C) 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
BibSWORD Client Http Queries
'''
import urllib2
from tempfile import NamedTemporaryFile
from invenio.config import CFG_TMPDIR
from invenio.utils.url import make_user_agent_string
class RemoteSwordServer:
'''This class gives every tools to communicate with the SWORD/APP deposit
of ArXiv.
'''
# static variable used to properly perform http request
agent = make_user_agent_string("BibSWORD")
def __init__(self, authentication_infos):
'''
This method the constructor of the class, it initialise the
connection using a passord. That allows users to connect with
auto-authentication.
@param self: reference to the current instance of the class
@param authentication_infos: dictionary with authentication infos containing
keys:
- realm: realm of the server
- hostname: hostname of the server
- username: name of an arxiv known user
- password: password of the known user
'''
#password manager with default realm to avoid looking for it
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(authentication_infos['realm'],
authentication_infos['hostname'],
authentication_infos['username'],
authentication_infos['password'])
#create an authentificaiton handler
authhandler = urllib2.HTTPBasicAuthHandler(passman)
http_handler = urllib2.HTTPHandler(debuglevel=0)
opener = urllib2.build_opener(authhandler, http_handler)
# insalling : every call to opener will user the same user/pass
urllib2.install_opener(opener)
def get_remote_collection(self, url):
'''
This method sent a request to the servicedocument to know the
collections offer by arxives.
@param self: reference to the current instance of the class
@param url: the url where the request is made
@return: (xml file) collection of arxiv allowed for the user
'''
#format the request
request = urllib2.Request(url)
#launch request
#try:
response = urllib2.urlopen(request)
#except urllib2.HTTPError:
# return ''
#except urllib2.URLError:
# return ''
return response.read()
def deposit_media(self, media, collection, onbehalf):
'''
This method allow the deposit of any type of media on a given arxiv
collection.
@param self: reference to the current instanc off the class
@param media: dict of file info {'type', 'size', 'file'}
@param collection: abreviation of the collection where to deposit
@param onbehalf: user that make the deposition
@return: (xml file) contains error ot the url of the temp file
'''
#format the final deposit URL
deposit_url = collection
#prepare the header
headers = {}
headers['Content-Type'] = media['type']
headers['Content-Length'] = media['size']
#if on behalf, add to the header
if onbehalf != '':
headers['X-On-Behalf-Of'] = onbehalf
headers['X-No-Op'] = 'True'
headers['X-Verbose'] = 'True'
headers['User-Agent'] = self.agent
#format the request
result = urllib2.Request(deposit_url, media['file'], headers)
#launch request
try:
return urllib2.urlopen(result).read()
except urllib2.HTTPError:
return ''
def metadata_submission(self, deposit_url, metadata, onbehalf):
'''
This method send the metadata to ArXiv, then return the answere
@param metadata: xml file to submit to ArXiv
@param onbehalf: specify the persone (and email) to informe of the
publication
'''
#prepare the header of the request
headers = {}
headers['Host'] = 'arxiv.org'
headers['User-Agent'] = self.agent
headers['Content-Type'] = 'application/atom+xml;type=entry'
#if on behalf, add to the header
if onbehalf != '':
headers['X-On-Behalf-Of'] = onbehalf
headers['X-No-Op'] = 'True'
headers['X-verbose'] = 'True'
#format the request
result = urllib2.Request(deposit_url, metadata, headers)
#launch request
try:
response = urllib2.urlopen(result).read()
except urllib2.HTTPError as e:
tmpfd = NamedTemporaryFile(mode='w', suffix='.xml', prefix='bibsword_error_',
dir=CFG_TMPDIR, delete=False)
tmpfd.write(e.read())
tmpfd.close()
return ''
except urllib2.URLError:
return ''
return response
def get_submission_status(self, status_url) :
'''
This method get the xml file from the given URL and return it
@param status_url: url where to get the status
@return: xml atom entry containing the status
'''
#format the http request
request = urllib2.Request(status_url)
request.add_header('Host', 'arxiv.org')
request.add_header('User-Agent', self.agent)
#launch request
try:
response = urllib2.urlopen(request).read()
except urllib2.HTTPError:
return 'HTTPError (Might be an authentication issue)'
except urllib2.URLError:
return 'Wrong url'
return response
| gpl-2.0 |
zouzhberk/ambaridemo | demo-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py | 1 | 4958 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.exceptions import Fail
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.resources.system import Directory, Execute, File, Link
from resource_management.core.source import InlineTemplate
from resource_management.libraries.resources.template_config import TemplateConfig
from resource_management.libraries.functions.format import format
from resource_management.libraries.script.script import Script
from resource_management.core.source import Template
from resource_management.libraries.functions import compare_versions
from storm_yaml_utils import yaml_config_template, yaml_config
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def storm(name=None):
import params
yaml_config("storm.yaml",
conf_dir=params.conf_dir,
configurations=params.config['configurations']['storm-site'],
owner=params.storm_user
)
if params.service_map.has_key(name):
service_name = params.service_map[name]
ServiceConfig(service_name,
action="change_user",
username = params.storm_user,
password = Script.get_password(params.storm_user))
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def storm(name=None):
import params
Directory(params.log_dir,
owner=params.storm_user,
group=params.user_group,
mode=0777,
recursive=True
)
Directory([params.pid_dir, params.local_dir],
owner=params.storm_user,
group=params.user_group,
recursive=True,
cd_access="a",
)
Directory(params.conf_dir,
group=params.user_group,
recursive=True,
cd_access="a",
)
File(format("{conf_dir}/config.yaml"),
content=Template("config.yaml.j2"),
owner=params.storm_user,
group=params.user_group
)
configurations = params.config['configurations']['storm-site']
File(format("{conf_dir}/storm.yaml"),
content=yaml_config_template(configurations),
owner=params.storm_user,
group=params.user_group
)
if params.has_metric_collector:
File(format("{conf_dir}/storm-metrics2.properties"),
owner=params.storm_user,
group=params.user_group,
content=Template("storm-metrics2.properties.j2")
)
# Remove symlink. It can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar",
action="delete")
Execute(format("{sudo} ln -s {metric_collector_sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
not_if=format("ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
only_if=format("ls {metric_collector_sink_jar}")
)
File(format("{conf_dir}/storm-env.sh"),
owner=params.storm_user,
content=InlineTemplate(params.storm_env_sh_template)
)
if params.security_enabled:
TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
owner=params.storm_user
)
if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
TemplateConfig(format("{conf_dir}/client_jaas.conf"),
owner=params.storm_user
)
minRuid = configurations['_storm.min.ruid'] if configurations.has_key('_storm.min.ruid') else ''
min_user_ruid = int(minRuid) if minRuid.isdigit() else _find_real_user_min_uid()
File(format("{conf_dir}/worker-launcher.cfg"),
content=Template("worker-launcher.cfg.j2", min_user_ruid = min_user_ruid),
owner='root',
group=params.user_group
)
'''
Finds minimal real user UID
'''
def _find_real_user_min_uid():
with open('/etc/login.defs') as f:
for line in f:
if line.strip().startswith('UID_MIN') and len(line.split()) == 2 and line.split()[1].isdigit():
return int(line.split()[1])
raise Fail("Unable to find UID_MIN in file /etc/login.defs. Expecting format e.g.: 'UID_MIN 500'")
| apache-2.0 |
tradel/AppDynamicsREST | examples/license_count.py | 1 | 3098 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
__version__ = '0.4.5'
def incr(d, name, amt=1):
d[name] = d.get(name, 0) + amt
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
nodes = []
for app in c.get_applications():
for node in c.get_nodes(app.id):
# node_type = node.type
# print node.id, node.machine_id, node.machine_name, node.type
# print node.type, node.os_type, node.app_agent_version
if node.has_machine_agent or node.has_app_agent:
if node.has_app_agent:
if 'PHP' in node.type:
node.group_type = 'PHP App Agent'
if 'IIS' in node.type:
node.group_type = '.NET App Agent'
else:
node.group_type = 'Java App Agent'
else:
node.group_type = 'Machine Agent only'
nodes.append(node)
# Sort and group the nodes by machine_id.
group_func = lambda x: x.machine_id
nodes.sort(key=group_func)
host_counts = dict()
node_counts = dict()
lic_counts = dict()
for machine_id, nodes_on_machine_iter in itertools.groupby(nodes, key=group_func):
nodes_on_machine = list(nodes_on_machine_iter)
first_node = nodes_on_machine[0]
agent_type = first_node.group_type
# types = [x.group_type for x in nodes_on_machine]
# all_same = all(x.group_type == agent_type for x in nodes_on_machine)
# print all_same, types
# assert all_same, first_node
license_count = 1
if 'Java' in agent_type:
license_count = len(nodes_on_machine)
incr(lic_counts, agent_type, license_count)
incr(host_counts, agent_type, 1)
incr(node_counts, agent_type, len(nodes_on_machine))
# if '.NET' in agent_type:
# node_names = [x.name for x in nodes_on_machine]
# print 'Host:', first_node.machine_name, '\n\t', '\n\t'.join(node_names)
# Print the results.
tot_nodes, tot_hosts, tot_licenses = (0, 0, 0)
header_fmt = '%-30s %-15s %-15s %s'
data_fmt = '%-30s %15d %15d %15d'
print()
print('License usage report for ' + args.url)
print('Generated at: ' + str(datetime.now()))
print()
print(header_fmt % ('Node Type', 'Node Count', 'Host Count', 'License Count'))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
for node_type in ('Java App Agent', '.NET App Agent', 'PHP App Agent', 'Machine Agent only'):
node_count = node_counts.get(node_type, 0)
host_count = host_counts.get(node_type, 0)
lic_count = lic_counts.get(node_type, 0)
tot_nodes += node_count
tot_hosts += host_count
tot_licenses += lic_count
print(data_fmt % (node_type, node_count, host_count, lic_count))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
print(data_fmt % ('TOTAL', tot_nodes, tot_hosts, tot_licenses))
| apache-2.0 |
yajiedesign/mxnet | tests/nightly/test_server_profiling.py | 18 | 2797 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import json
key = '99'
shape = (1200, 1200) # bigger than MXNET_KVSTORE_BIGARRAY_BOUND
kv = mx.kv.create('dist_sync')
def init_kv():
# init kv dns keys
kv.init(key, mx.nd.ones(shape))
kv.set_optimizer(mx.optimizer.create('sgd'))
return kv, kv.rank, kv.num_workers
def test_sync_push_pull():
kv, my_rank, nworker = init_kv()
def check_default_keys(kv, my_rank):
nrepeat = 10
# checks pull after push in loop, because behavior during
# consecutive pushes doesn't offer any guarantees
for i in range(nrepeat):
kv.push(key, mx.nd.ones(shape, dtype='float32') * (my_rank+1))
val = mx.nd.zeros(shape, dtype='float32')
kv.pull(key, out=val)
mx.nd.waitall()
check_default_keys(kv, my_rank)
if __name__ == "__main__":
server_filename_suffix = 'test_profile_server.json'
worker_filename_suffix = 'test_profile_worker.json'
mx.profiler.set_config(filename=server_filename_suffix, profile_all=True, profile_process='server')
mx.profiler.set_config(filename='rank' + str(kv.rank) + '_' + worker_filename_suffix, profile_all=True, profile_process='worker')
mx.profiler.set_state(state='run', profile_process='server')
mx.profiler.set_state(state='run', profile_process='worker')
test_sync_push_pull()
mx.profiler.set_state(state='stop', profile_process='server')
mx.profiler.set_state(state='stop', profile_process='worker')
import glob, os
# will only work when launcher mode is local, as used for integration test
if kv.rank == 0:
for rank in range(kv.num_workers):
for suffix in [worker_filename_suffix, server_filename_suffix]:
# throws value error if file is not proper json
filename = 'rank' + str(rank) + '_' + suffix
print(glob.glob('*'), os.getcwd())
with open(filename, 'r') as f:
j = json.load(f)
| apache-2.0 |
cernops/neutron | neutron/tests/unit/bigswitch/fake_server.py | 6 | 6094 | # Copyright 2013 Big Switch Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import servermanager
LOG = logging.getLogger(__name__)
class HTTPResponseMock():
status = 200
reason = 'OK'
def __init__(self, sock, debuglevel=0, strict=0, method=None,
buffering=False):
pass
def read(self):
return "{'status': '200 OK'}"
def getheader(self, header):
return None
class HTTPResponseMock404(HTTPResponseMock):
status = 404
reason = 'Not Found'
def read(self):
return "{'status': '%s 404 Not Found'}" % servermanager.NXNETWORK
class HTTPResponseMock500(HTTPResponseMock):
status = 500
reason = 'Internal Server Error'
def __init__(self, sock, debuglevel=0, strict=0, method=None,
buffering=False, errmsg='500 Internal Server Error'):
self.errmsg = errmsg
def read(self):
return "{'status': '%s'}" % self.errmsg
class HTTPConnectionMock(object):
def __init__(self, server, port, timeout):
self.response = None
self.broken = False
# Port 9000 is the broken server
if port == 9000:
self.broken = True
errmsg = "This server is broken, please try another"
self.response = HTTPResponseMock500(None, errmsg=errmsg)
def request(self, action, uri, body, headers):
LOG.debug(_("Request: action=%(action)s, uri=%(uri)r, "
"body=%(body)s, headers=%(headers)s"),
{'action': action, 'uri': uri,
'body': body, 'headers': headers})
if self.broken and "ExceptOnBadServer" in uri:
raise Exception("Broken server got an unexpected request")
if self.response:
return
# detachment may return 404 and plugin shouldn't die
if uri.endswith('attachment') and action == 'DELETE':
self.response = HTTPResponseMock404(None)
else:
self.response = HTTPResponseMock(None)
# Port creations/updates must contain binding information
if ('port' in uri and 'attachment' not in uri
and 'binding' not in body and action in ('POST', 'PUT')):
errmsg = "Port binding info missing in port request '%s'" % body
self.response = HTTPResponseMock500(None, errmsg=errmsg)
return
return
def getresponse(self):
return self.response
def close(self):
pass
class HTTPConnectionMock404(HTTPConnectionMock):
def __init__(self, server, port, timeout):
self.response = HTTPResponseMock404(None)
self.broken = True
class HTTPConnectionMock500(HTTPConnectionMock):
def __init__(self, server, port, timeout):
self.response = HTTPResponseMock500(None)
self.broken = True
class VerifyMultiTenantFloatingIP(HTTPConnectionMock):
def request(self, action, uri, body, headers):
# Only handle network update requests
if 'network' in uri and 'tenant' in uri and 'ports' not in uri:
req = jsonutils.loads(body)
if 'network' not in req or 'floatingips' not in req['network']:
msg = _("No floating IPs in request"
"uri=%(uri)s, body=%(body)s") % {'uri': uri,
'body': body}
raise Exception(msg)
distinct_tenants = []
for flip in req['network']['floatingips']:
if flip['tenant_id'] not in distinct_tenants:
distinct_tenants.append(flip['tenant_id'])
if len(distinct_tenants) < 2:
msg = _("Expected floating IPs from multiple tenants."
"uri=%(uri)s, body=%(body)s") % {'uri': uri,
'body': body}
raise Exception(msg)
super(VerifyMultiTenantFloatingIP,
self).request(action, uri, body, headers)
class HTTPSMockBase(HTTPConnectionMock):
expected_cert = ''
combined_cert = None
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, source_address=None):
self.host = host
super(HTTPSMockBase, self).__init__(host, port, timeout)
def request(self, method, url, body=None, headers={}):
self.connect()
super(HTTPSMockBase, self).request(method, url, body, headers)
class HTTPSNoValidation(HTTPSMockBase):
def connect(self):
if self.combined_cert:
raise Exception('combined_cert set on NoValidation')
class HTTPSCAValidation(HTTPSMockBase):
expected_cert = 'DUMMYCERTIFICATEAUTHORITY'
def connect(self):
contents = get_cert_contents(self.combined_cert)
if self.expected_cert not in contents:
raise Exception('No dummy CA cert in cert_file')
class HTTPSHostValidation(HTTPSMockBase):
expected_cert = 'DUMMYCERTFORHOST%s'
def connect(self):
contents = get_cert_contents(self.combined_cert)
expected = self.expected_cert % self.host
if expected not in contents:
raise Exception(_('No host cert for %(server)s in cert %(cert)s'),
{'server': self.host, 'cert': contents})
def get_cert_contents(path):
raise Exception('METHOD MUST BE MOCKED FOR TEST')
| apache-2.0 |
skalanux/filezaar | filezaar-daemon-test.py | 1 | 7080 | #!/usr/bin/env python
# This File is Slightly based on wicd daemon implementation
# Copyright (C) 2007 - 2008 Adam Blackburn
# Copyright (C) 2007 - 2008 Dan O'Reilly
# Copyright (C) 2007 - 2008 Byron Hillis
#
# Filezaar Daemon:
# Copyright (C) 2009 Juan Manuel Schillaci
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
""" filezaar - filezaar daemon implementation.
"""
import os
import sys
import time
import getopt
import ConfigParser
import signal
import logging
import Queue
# DBUS
import gobject
import dbus
import dbus.service
# Others clean up
import os, sys
import re
from fnmatch import translate
import time
from bzrlib import branch, errors
from bzrlib.conflicts import ConflictList
from bzrlib.lockdir import LockDir
from bzrlib.workingtree import WorkingTree
from dbus.mainloop.glib import DBusGMainLoop
# FileZaar specific libraries
#from filezaar.queuemanager import QueueManager
#from filezaar.watcher import Watcher
import config
from filezaar.constants import *
from filezaar.updater import Updater
DBusGMainLoop(set_as_default=True)
class FileZaarDBUS(dbus.service.Object):
def __init__(self, bus_name, object_path='/org/filezaar/daemon',
auto_connect=True):
dbus.service.Object.__init__(self, bus_name, object_path)
self.updater = Updater()
@dbus.service.method('org.filezaar.daemon')
def Hello(self):
""" Returns the version number.
"""
# VERSIONNUMBER
version = '0.1'
print 'returned version number', version
return version
@dbus.service.method('org.filezaar.daemon')
def GetFileZaarStatus(self):
""" Returns the version number.
"""
# Chacking for the state of filezaar
return STATUS_IDLE, "Filezaar is uptodate"
@dbus.service.method('org.filezaar.daemon')
def UploadFile(self, file):
""" Returns the version number.
"""
print 'Uploading File'
self.updater.upload_file(file)
@dbus.service.method('org.filezaar.daemon')
def Sync(self):
print 'Synchronizing'
self.updater._sync()
self.EmitStatusChanged(100, 'Uploading')
@dbus.service.method('org.filezaar.daemon', in_signature='uav')
def EmitStatusChanged(self, state, info):
self.StatusChanged(state, info)
@dbus.service.signal(dbus_interface='org.filezaar.daemon', signature='uav')
def StatusChanged(self, state, info):
""" Emits a "status changed" dbus signal.
This D-Bus signal is emitted when the connection status changes.
"""
pass
################ Start Module Methods #############################
###################################################################
def usage():
# VERSIONNUMBER
print """
filezaar 0.1
FileZaar connection daemon.
"""
def daemonize():
""" Disconnect from the controlling terminal.
"""
# Fork the first time to disconnect from the parent terminal and
# exit the parent process.
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
print >> sys.stderr, "Fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# Decouple from parent environment to stop us from being a zombie.
os.setsid()
os.umask(0)
# Fork the second time to prevent us from opening a file that will
# become our controlling terminal.
try:
pid = os.fork()
if pid > 0:
pidfile = '/tmp/filezaar.pid'
dirname = os.path.dirname(pidfile)
if not os.path.exists(dirname):
os.makedirs(dirname)
pidfile = open(pidfile, 'w')
pidfile.write(str(pid) + '\n')
pidfile.close()
sys.exit(0)
except OSError, e:
print >> sys.stderr, "Fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
sys.stdout.flush()
sys.stderr.flush()
os.close(sys.__stdin__.fileno())
os.close(sys.__stdout__.fileno())
os.close(sys.__stderr__.fileno())
# stdin always from /dev/null
sys.stdin = open('/dev/null', 'r')
def main(argv):
""" The main daemon program.
Keyword arguments:
argv -- The arguments passed to the script.
"""
global child_pid
do_daemonize = True
redirect_stderr = True
redirect_stdout = True
auto_connect = True
try:
opts, args = getopt.getopt(sys.argv[1:], 'fenoah',
['help', 'no-daemon', 'no-poll', 'no-stderr', 'no-stdout',
'no-autoconnect'])
except getopt.GetoptError:
# Print help information and exit
usage()
sys.exit(2)
no_poll = False
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit()
if o in ('-e', '--no-stderr'):
redirect_stderr = False
if o in ('-o', '--no-stdout'):
redirect_stdout = False
if o in ('-f', '--no-daemon'):
do_daemonize = False
if o in ('-a', '--no-autoconnect'):
auto_connect = False
if o in ('-n', '--no-poll'):
no_poll = True
#This is commented only for testing purposes
#daemonize()
# Open the DBUS session
# TODO: Decide if I Should User a SessionBus(SessionWide) or a SystemBus(SystemWide)
d_bus_name = dbus.service.BusName('org.filezaar.daemon', bus=dbus.SessionBus())
obj = FileZaarDBUS(d_bus_name, auto_connect=auto_connect)
gobject.threads_init()
(child_pid, x, x, x) = gobject.spawn_async(["main.py"],
flags=gobject.SPAWN_CHILD_INHERITS_STDIN)
signal.signal(signal.SIGTERM, sigterm_caught)
# Enter the main loop
mainloop = gobject.MainLoop()
mainloop.run()
def sigterm_caught(sig, frame):
""" Called when a SIGTERM is caught, kills monitor.py before exiting. """
global child_pid
print 'SIGTERM caught, killing filezaar-monitor...'
os.kill(child_pid, signal.SIGTERM)
print 'Removing PID file...'
if os.path.exists(wpath.pidfile):
os.remove(wpath.pidfile)
print 'Shutting down...'
sys.exit(0)
if __name__ == '__main__':
#Only do this if we decide to use system bus instead of the session One
#if os.getuid() != 0:
# print ("Root privileges are required for the daemon to run properly." +
# " Exiting.")
# sys.exit(1)
main(sys.argv)
| gpl-3.0 |
c0710204/mirrorsBistu | pypi/bandersnatch/lib/python2.7/site-packages/execnet/gateway.py | 1 | 7382 | """
gateway code for initiating popen, socket and ssh connections.
(c) 2004-2009, Holger Krekel and others
"""
import sys, os, inspect, types, linecache
import textwrap
import execnet
from execnet.gateway_base import Message
from execnet.gateway_io import Popen2IOMaster
from execnet import gateway_base
importdir = os.path.dirname(os.path.dirname(execnet.__file__))
class Gateway(gateway_base.BaseGateway):
""" Gateway to a local or remote Python Intepreter. """
def __init__(self, io, id):
super(Gateway, self).__init__(io=io, id=id, _startcount=1)
self._initreceive()
@property
def remoteaddress(self):
return self._io.remoteaddress
def __repr__(self):
""" return string representing gateway type and status. """
try:
r = (self.hasreceiver() and 'receive-live' or 'not-receiving')
i = len(self._channelfactory.channels())
except AttributeError:
r = "uninitialized"
i = "no"
return "<%s id=%r %s, %s active channels>" %(
self.__class__.__name__, self.id, r, i)
def exit(self):
""" trigger gateway exit. Defer waiting for finishing
of receiver-thread and subprocess activity to when
group.terminate() is called.
"""
self._trace("gateway.exit() called")
if self not in self._group:
self._trace("gateway already unregistered with group")
return
self._group._unregister(self)
self._trace("--> sending GATEWAY_TERMINATE")
try:
self._send(Message.GATEWAY_TERMINATE)
self._io.close_write()
except IOError:
v = sys.exc_info()[1]
self._trace("io-error: could not send termination sequence")
self._trace(" exception: %r" % v)
def reconfigure(self, py2str_as_py3str=True, py3str_as_py2str=False):
"""
set the string coercion for this gateway
the default is to try to convert py2 str as py3 str,
but not to try and convert py3 str to py2 str
"""
self._strconfig = (py2str_as_py3str, py3str_as_py2str)
data = gateway_base.dumps_internal(self._strconfig)
self._send(Message.RECONFIGURE, data=data)
def _rinfo(self, update=False):
""" return some sys/env information from remote. """
if update or not hasattr(self, '_cache_rinfo'):
ch = self.remote_exec(rinfo_source)
self._cache_rinfo = RInfo(ch.receive())
return self._cache_rinfo
def hasreceiver(self):
""" return True if gateway is able to receive data. """
return self._receiverthread.isAlive() # approxmimation
def remote_status(self):
""" return information object about remote execution status. """
channel = self.newchannel()
self._send(Message.STATUS, channel.id)
statusdict = channel.receive()
# the other side didn't actually instantiate a channel
# so we just delete the internal id/channel mapping
self._channelfactory._local_close(channel.id)
return RemoteStatus(statusdict)
def remote_exec(self, source, **kwargs):
""" return channel object and connect it to a remote
execution thread where the given ``source`` executes.
* ``source`` is a string: execute source string remotely
with a ``channel`` put into the global namespace.
* ``source`` is a pure function: serialize source and
call function with ``**kwargs``, adding a
``channel`` object to the keyword arguments.
* ``source`` is a pure module: execute source of module
with a ``channel`` in its global namespace
In all cases the binding ``__name__='__channelexec__'``
will be available in the global namespace of the remotely
executing code.
"""
call_name = None
if isinstance(source, types.ModuleType):
linecache.updatecache(inspect.getsourcefile(source))
source = inspect.getsource(source)
elif isinstance(source, types.FunctionType):
call_name = source.__name__
source = _source_of_function(source)
else:
source = textwrap.dedent(str(source))
if call_name is None and kwargs:
raise TypeError("can't pass kwargs to non-function remote_exec")
channel = self.newchannel()
self._send(Message.CHANNEL_EXEC,
channel.id,
gateway_base.dumps_internal((source, call_name, kwargs)))
return channel
def remote_init_threads(self, num=None):
""" start up to 'num' threads for subsequent
remote_exec() invocations to allow concurrent
execution.
"""
if hasattr(self, '_remotechannelthread'):
raise IOError("remote threads already running")
from execnet import threadpool
source = inspect.getsource(threadpool)
self._remotechannelthread = self.remote_exec(source)
self._remotechannelthread.send(num)
status = self._remotechannelthread.receive()
assert status == "ok", status
class RInfo:
def __init__(self, kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
info = ", ".join(["%s=%s" % item
for item in self.__dict__.items()])
return "<RInfo %r>" % info
RemoteStatus = RInfo
def rinfo_source(channel):
import sys, os
channel.send(dict(
executable = sys.executable,
version_info = sys.version_info[:5],
platform = sys.platform,
cwd = os.getcwd(),
pid = os.getpid(),
))
def _find_non_builtin_globals(source, codeobj):
try:
import ast
except ImportError:
return None
try:
import __builtin__
except ImportError:
import builtins as __builtin__
vars = dict.fromkeys(codeobj.co_varnames)
all = []
for node in ast.walk(ast.parse(source)):
if (isinstance(node, ast.Name) and node.id not in vars and
node.id not in __builtin__.__dict__):
all.append(node.id)
return all
def _source_of_function(function):
if function.__name__ == '<lambda>':
raise ValueError("can't evaluate lambda functions'")
#XXX: we dont check before remote instanciation
# if arguments are used propperly
args, varargs, keywords, defaults = inspect.getargspec(function)
if args[0] != 'channel':
raise ValueError('expected first function argument to be `channel`')
if sys.version_info < (3,0):
closure = function.func_closure
codeobj = function.func_code
else:
closure = function.__closure__
codeobj = function.__code__
if closure is not None:
raise ValueError("functions with closures can't be passed")
try:
source = inspect.getsource(function)
except IOError:
raise ValueError("can't find source file for %s" % function)
source = textwrap.dedent(source) # just for inner functions
used_globals = _find_non_builtin_globals(source, codeobj)
if used_globals:
raise ValueError(
"the use of non-builtin globals isn't supported",
used_globals,
)
return source
| mit |
mikest/geode | geode/force/__init__.py | 3 | 3405 | from __future__ import division,absolute_import
from geode import *
def edge_springs(mesh,mass,X,stiffness,damping_ratio):
return Springs(mesh.segment_soup().elements,mass,X,stiffness,damping_ratio)
def bending_springs(mesh,mass,X,stiffness,damping_ratio):
springs = ascontiguousarray(mesh.bending_quadruples()[:,(0,3)])
return Springs(springs,mass,X,stiffness,damping_ratio)
StrainMeasure = {2:StrainMeasure2d,3:StrainMeasure3d}
FiniteVolume = {(2,2):FiniteVolume2d,(3,2):FiniteVolumeS3d,(3,3):FiniteVolume3d}
LinearFiniteVolume = {(2,2):LinearFiniteVolume2d,(3,2):LinearFiniteVolumeS3d,(3,3):LinearFiniteVolume3d}
def finite_volume(mesh,density,X,model,m=None,plasticity=None,verbose=True):
elements = mesh.elements if isinstance(mesh,Object) else asarray(mesh,dtype=int32)
mx,d = asarray(X).shape[1],elements.shape[1]-1
if m is None:
m = mx
strain = StrainMeasure[d](elements,X)
if verbose:
strain.print_altitude_statistics()
if isinstance(model,dict):
model = model[d]
return FiniteVolume[m,d](strain,density,model,plasticity)
def linear_finite_volume(mesh,X,density,youngs_modulus=3e6,poissons_ratio=.4,rayleigh_coefficient=.05):
elements = mesh.elements if isinstance(mesh,Object) else asarray(mesh,dtype=int32)
m,d = asarray(X).shape[1],elements.shape[1]-1
if d==7:
return LinearFiniteVolumeHex(StrainMeasureHex(elements,X),density,youngs_modulus,poissons_ratio,rayleigh_coefficient)
else:
return LinearFiniteVolume[m,d](elements,X,density,youngs_modulus,poissons_ratio,rayleigh_coefficient)
def neo_hookean(youngs_modulus=3e6,poissons_ratio=.475,rayleigh_coefficient=.05,failure_threshold=.25):
return {2:NeoHookean2d(youngs_modulus,poissons_ratio,rayleigh_coefficient,failure_threshold),
3:NeoHookean3d(youngs_modulus,poissons_ratio,rayleigh_coefficient,failure_threshold)}
def simple_shell(mesh,density,Dm=None,X=None,stretch=(0,0),shear=0):
mesh = mesh if isinstance(mesh,Object) else TriangleSoup(asarray(mesh,dtype=int32))
if Dm is None:
X = asarray(X)
assert X.ndim==2 and X.shape[1]==2, 'Expected 2D rest state'
tris = mesh.elements
Dm = X[tris[:,1:]].swapaxes(1,2)-X[tris[:,0]].reshape(-1,2,1)
else:
assert X is None
shell = SimpleShell(mesh,ascontiguousarray(Dm),density)
shell.stretch_stiffness = stretch
shell.shear_stiffness = shear
return shell
LinearBendingElements = {2:LinearBendingElements2d,3:LinearBendingElements3d}
def linear_bending_elements(mesh,X,stiffness,damping):
X = asarray(X)
bend = LinearBendingElements[X.shape[1]](mesh,X)
bend.stiffness = stiffness
bend.damping = damping
return bend
CubicHinges = {2:CubicHinges2d,3:CubicHinges3d}
def cubic_hinges(mesh,X,stiffness,damping,angles=None):
bends = mesh.bending_tuples()
X = asarray(X)
Hinges = CubicHinges[X.shape[1]]
if angles is None:
angles = Hinges.angles(bends,X)
hinges = CubicHinges[X.shape[1]](bends,angles,X)
hinges.stiffness = stiffness
hinges.damping = damping
return hinges
BindingSprings = {2:BindingSprings2d,3:BindingSprings3d}
def binding_springs(nodes,parents,weights,mass,stiffness,damping_ratio):
parents = asarray(parents,dtype=int32)
return BindingSprings[parents.shape[1]](nodes,parents,weights,mass,stiffness,damping_ratio)
particle_binding_springs = ParticleBindingSprings
edge_binding_springs = BindingSprings2d
face_binding_springs = BindingSprings3d
| bsd-3-clause |
frreiss/tensorflow-fred | tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py | 6 | 5123 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the sequence datasets serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class SkipDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_skip_dataset(self, count):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).skip(count)
@combinations.generate(test_base.default_test_combinations())
def testSkipFewerThanInputs(self):
count = 4
num_outputs = 10 - count
self.run_core_tests(lambda: self._build_skip_dataset(count), num_outputs)
@combinations.generate(test_base.default_test_combinations())
def testSkipVarious(self):
# Skip more than inputs
self.run_core_tests(lambda: self._build_skip_dataset(20), 0)
# Skip exactly the input size
self.run_core_tests(lambda: self._build_skip_dataset(10), 0)
self.run_core_tests(lambda: self._build_skip_dataset(-1), 0)
# Skip nothing
self.run_core_tests(lambda: self._build_skip_dataset(0), 10)
@combinations.generate(test_base.default_test_combinations())
def testInvalidSkip(self):
with self.assertRaisesRegex(ValueError,
'Shape must be rank 0 but is rank 1'):
self.run_core_tests(lambda: self._build_skip_dataset([1, 2]), 0)
class TakeDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_take_dataset(self, count):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).take(count)
@combinations.generate(test_base.default_test_combinations())
def testTakeFewerThanInputs(self):
count = 4
self.run_core_tests(lambda: self._build_take_dataset(count), count)
@combinations.generate(test_base.default_test_combinations())
def testTakeVarious(self):
# Take more than inputs
self.run_core_tests(lambda: self._build_take_dataset(20), 10)
# Take exactly the input size
self.run_core_tests(lambda: self._build_take_dataset(10), 10)
# Take all
self.run_core_tests(lambda: self._build_take_dataset(-1), 10)
# Take nothing
self.run_core_tests(lambda: self._build_take_dataset(0), 0)
def testInvalidTake(self):
with self.assertRaisesRegex(ValueError,
'Shape must be rank 0 but is rank 1'):
self.run_core_tests(lambda: self._build_take_dataset([1, 2]), 0)
class RepeatDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_repeat_dataset(self, count, take_count=3):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).take(
take_count).repeat(count)
@combinations.generate(test_base.default_test_combinations())
def testFiniteRepeat(self):
count = 10
self.run_core_tests(lambda: self._build_repeat_dataset(count), 3 * count)
@combinations.generate(test_base.default_test_combinations())
def testEmptyRepeat(self):
self.run_core_tests(lambda: self._build_repeat_dataset(0), 0)
@combinations.generate(test_base.default_test_combinations())
def testInfiniteRepeat(self):
self.verify_unused_iterator(
lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False)
self.verify_multiple_breaks(
lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)
self.verify_reset_restored_iterator(
lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)
# Test repeat empty dataset
self.run_core_tests(lambda: self._build_repeat_dataset(-1, 0), 0)
@combinations.generate(test_base.default_test_combinations())
def testInvalidRepeat(self):
with self.assertRaisesRegex(ValueError,
'Shape must be rank 0 but is rank 1'):
self.run_core_tests(lambda: self._build_repeat_dataset([1, 2], 0), 0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Deepakpatle/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py | 118 | 7755 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest2 as unittest
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.host_mock import MockHost
class TestBaselineOptimizer(BaselineOptimizer):
def __init__(self, mock_results_by_directory):
host = MockHost()
BaselineOptimizer.__init__(self, host, host.port_factory.all_port_names())
self._mock_results_by_directory = mock_results_by_directory
# We override this method for testing so we don't have to construct an
# elaborate mock file system.
def read_results_by_directory(self, baseline_name):
return self._mock_results_by_directory
def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory):
self.new_results_by_directory = new_results_by_directory
class BaselineOptimizerTest(unittest.TestCase):
def _assertOptimization(self, results_by_directory, expected_new_results_by_directory):
baseline_optimizer = TestBaselineOptimizer(results_by_directory)
self.assertTrue(baseline_optimizer.optimize('mock-baseline.png'))
self.assertEqual(baseline_optimizer.new_results_by_directory, expected_new_results_by_directory)
def _assertOptimizationFailed(self, results_by_directory):
baseline_optimizer = TestBaselineOptimizer(results_by_directory)
self.assertFalse(baseline_optimizer.optimize('mock-baseline.png'))
def test_move_baselines(self):
host = MockHost()
host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/mac-lion/another/test-expected.txt', 'result A')
host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/mac-lion-wk2/another/test-expected.txt', 'result A')
host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/mac/another/test-expected.txt', 'result B')
baseline_optimizer = BaselineOptimizer(host, host.port_factory.all_port_names())
baseline_optimizer._move_baselines('another/test-expected.txt', {
'LayoutTests/platform/mac-lion': 'aaa',
'LayoutTests/platform/mac-lion-wk2': 'aaa',
'LayoutTests/platform/mac': 'bbb',
}, {
'LayoutTests/platform/mac': 'aaa',
})
self.assertEqual(host.filesystem.read_binary_file('/mock-checkout/LayoutTests/platform/mac/another/test-expected.txt'), 'result A')
def test_efl(self):
self._assertOptimization({
'LayoutTests/platform/efl': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
}, {
'LayoutTests/platform/efl': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
})
def test_no_add_mac_future(self):
self._assertOptimization({
'LayoutTests/platform/mac': '29a1715a6470d5dd9486a142f609708de84cdac8',
'LayoutTests/platform/win-xp': '453e67177a75b2e79905154ece0efba6e5bfb65d',
'LayoutTests/platform/mac-lion': 'c43eaeb358f49d5e835236ae23b7e49d7f2b089f',
}, {
'LayoutTests/platform/mac': '29a1715a6470d5dd9486a142f609708de84cdac8',
'LayoutTests/platform/win-xp': '453e67177a75b2e79905154ece0efba6e5bfb65d',
'LayoutTests/platform/mac-lion': 'c43eaeb358f49d5e835236ae23b7e49d7f2b089f',
})
def test_mac_future(self):
self._assertOptimization({
'LayoutTests/platform/mac-lion': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
}, {
'LayoutTests/platform/mac-lion': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
})
def test_qt_unknown(self):
self._assertOptimization({
'LayoutTests/platform/qt': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
}, {
'LayoutTests/platform/qt': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
})
def test_win_does_not_drop_to_win_7sp0(self):
self._assertOptimization({
'LayoutTests/platform/win': '1',
'LayoutTests/platform/mac': '2',
'LayoutTests/platform/gtk': '3',
'LayoutTests/platform/qt': '4',
}, {
'LayoutTests/platform/win': '1',
'LayoutTests/platform/mac': '2',
'LayoutTests/platform/gtk': '3',
'LayoutTests/platform/qt': '4',
})
def test_common_directory_includes_root(self):
# This test case checks that we don't throw an exception when we fail
# to optimize.
self._assertOptimizationFailed({
'LayoutTests/platform/gtk': 'e8608763f6241ddacdd5c1ef1973ba27177d0846',
'LayoutTests/platform/qt': 'bcbd457d545986b7abf1221655d722363079ac87',
'LayoutTests/platform/mac': 'e8608763f6241ddacdd5c1ef1973ba27177d0846',
})
self._assertOptimization({
'LayoutTests': '9c876f8c3e4cc2aef9519a6c1174eb3432591127',
}, {
'LayoutTests': '9c876f8c3e4cc2aef9519a6c1174eb3432591127',
})
def test_complex_shadowing(self):
# This test relies on OS specific functionality, so it doesn't work on Windows.
# FIXME: What functionality does this rely on? When can we remove this if?
if sys.platform == 'win32':
return
self._assertOptimization({
'LayoutTests/platform/mac': '5daa78e55f05d9f0d1bb1f32b0cd1bc3a01e9364',
'LayoutTests/platform/mac-lion': '7ad045ece7c030e2283c5d21d9587be22bcba56e',
'LayoutTests/platform/win-xp': '5b1253ef4d5094530d5f1bc6cdb95c90b446bec7',
}, {
'LayoutTests/platform/mac': '5daa78e55f05d9f0d1bb1f32b0cd1bc3a01e9364',
'LayoutTests/platform/mac-lion': '7ad045ece7c030e2283c5d21d9587be22bcba56e',
'LayoutTests/platform/win-xp': '5b1253ef4d5094530d5f1bc6cdb95c90b446bec7',
})
def test_virtual_ports_filtered(self):
self._assertOptimization({
'LayoutTests/platform/gtk': '3',
'LayoutTests/platform/efl': '3',
'LayoutTests/platform/qt': '4',
'LayoutTests/platform/mac': '5',
}, {
'LayoutTests': '3',
'LayoutTests/platform/qt': '4',
'LayoutTests/platform/mac': '5',
})
| bsd-3-clause |
srusskih/SublimeJEDI | dependencies/jedi/third_party/django-stubs/mypy_django_plugin/transformers/fields.py | 2 | 7101 | from typing import Optional, Tuple, cast
from django.db.models.fields import Field
from django.db.models.fields.related import RelatedField
from mypy.nodes import AssignmentStmt, NameExpr, TypeInfo
from mypy.plugin import FunctionContext
from mypy.types import AnyType, Instance
from mypy.types import Type as MypyType
from mypy.types import TypeOfAny
from mypy_django_plugin.django.context import DjangoContext
from mypy_django_plugin.lib import fullnames, helpers
def _get_current_field_from_assignment(ctx: FunctionContext, django_context: DjangoContext) -> Optional[Field]:
outer_model_info = helpers.get_typechecker_api(ctx).scope.active_class()
if (outer_model_info is None
or not helpers.is_model_subclass_info(outer_model_info, django_context)):
return None
field_name = None
for stmt in outer_model_info.defn.defs.body:
if isinstance(stmt, AssignmentStmt):
if stmt.rvalue == ctx.context:
if not isinstance(stmt.lvalues[0], NameExpr):
return None
field_name = stmt.lvalues[0].name
break
if field_name is None:
return None
model_cls = django_context.get_model_class_by_fullname(outer_model_info.fullname)
if model_cls is None:
return None
current_field = model_cls._meta.get_field(field_name)
return current_field
def reparametrize_related_field_type(related_field_type: Instance, set_type, get_type) -> Instance:
args = [
helpers.convert_any_to_type(related_field_type.args[0], set_type),
helpers.convert_any_to_type(related_field_type.args[1], get_type),
]
return helpers.reparametrize_instance(related_field_type, new_args=args)
def fill_descriptor_types_for_related_field(ctx: FunctionContext, django_context: DjangoContext) -> MypyType:
current_field = _get_current_field_from_assignment(ctx, django_context)
if current_field is None:
return AnyType(TypeOfAny.from_error)
assert isinstance(current_field, RelatedField)
related_model_cls = django_context.get_field_related_model_cls(current_field)
if related_model_cls is None:
return AnyType(TypeOfAny.from_error)
default_related_field_type = set_descriptor_types_for_field(ctx)
# self reference with abstract=True on the model where ForeignKey is defined
current_model_cls = current_field.model
if (current_model_cls._meta.abstract
and current_model_cls == related_model_cls):
# for all derived non-abstract classes, set variable with this name to
# __get__/__set__ of ForeignKey of derived model
for model_cls in django_context.all_registered_model_classes:
if issubclass(model_cls, current_model_cls) and not model_cls._meta.abstract:
derived_model_info = helpers.lookup_class_typeinfo(helpers.get_typechecker_api(ctx), model_cls)
if derived_model_info is not None:
fk_ref_type = Instance(derived_model_info, [])
derived_fk_type = reparametrize_related_field_type(default_related_field_type,
set_type=fk_ref_type, get_type=fk_ref_type)
helpers.add_new_sym_for_info(derived_model_info,
name=current_field.name,
sym_type=derived_fk_type)
related_model = related_model_cls
related_model_to_set = related_model_cls
if related_model_to_set._meta.proxy_for_model is not None:
related_model_to_set = related_model_to_set._meta.proxy_for_model
typechecker_api = helpers.get_typechecker_api(ctx)
related_model_info = helpers.lookup_class_typeinfo(typechecker_api, related_model)
if related_model_info is None:
# maybe no type stub
related_model_type = AnyType(TypeOfAny.unannotated)
else:
related_model_type = Instance(related_model_info, []) # type: ignore
related_model_to_set_info = helpers.lookup_class_typeinfo(typechecker_api, related_model_to_set)
if related_model_to_set_info is None:
# maybe no type stub
related_model_to_set_type = AnyType(TypeOfAny.unannotated)
else:
related_model_to_set_type = Instance(related_model_to_set_info, []) # type: ignore
# replace Any with referred_to_type
return reparametrize_related_field_type(default_related_field_type,
set_type=related_model_to_set_type,
get_type=related_model_type)
def get_field_descriptor_types(field_info: TypeInfo, is_nullable: bool) -> Tuple[MypyType, MypyType]:
set_type = helpers.get_private_descriptor_type(field_info, '_pyi_private_set_type',
is_nullable=is_nullable)
get_type = helpers.get_private_descriptor_type(field_info, '_pyi_private_get_type',
is_nullable=is_nullable)
return set_type, get_type
def set_descriptor_types_for_field(ctx: FunctionContext) -> Instance:
default_return_type = cast(Instance, ctx.default_return_type)
is_nullable = False
null_expr = helpers.get_call_argument_by_name(ctx, 'null')
if null_expr is not None:
is_nullable = helpers.parse_bool(null_expr) or False
set_type, get_type = get_field_descriptor_types(default_return_type.type, is_nullable)
return helpers.reparametrize_instance(default_return_type, [set_type, get_type])
def determine_type_of_array_field(ctx: FunctionContext, django_context: DjangoContext) -> MypyType:
default_return_type = set_descriptor_types_for_field(ctx)
base_field_arg_type = helpers.get_call_argument_type_by_name(ctx, 'base_field')
if not base_field_arg_type or not isinstance(base_field_arg_type, Instance):
return default_return_type
base_type = base_field_arg_type.args[1] # extract __get__ type
args = []
for default_arg in default_return_type.args:
args.append(helpers.convert_any_to_type(default_arg, base_type))
return helpers.reparametrize_instance(default_return_type, args)
def transform_into_proper_return_type(ctx: FunctionContext, django_context: DjangoContext) -> MypyType:
default_return_type = ctx.default_return_type
assert isinstance(default_return_type, Instance)
outer_model_info = helpers.get_typechecker_api(ctx).scope.active_class()
if (outer_model_info is None
or not helpers.is_model_subclass_info(outer_model_info, django_context)):
return ctx.default_return_type
assert isinstance(outer_model_info, TypeInfo)
if helpers.has_any_of_bases(default_return_type.type, fullnames.RELATED_FIELDS_CLASSES):
return fill_descriptor_types_for_related_field(ctx, django_context)
if default_return_type.type.has_base(fullnames.ARRAY_FIELD_FULLNAME):
return determine_type_of_array_field(ctx, django_context)
return set_descriptor_types_for_field(ctx)
| mit |
ybotkiller/ybotkiller | youtube-grabber/youtube_grabber/comment_bucket.py | 1 | 2026 | # -*- coding: utf-8
import csv
import os
import re
import json
import subprocess
class CommentBucket(object):
def __init__(self, video_id):
self.video_id = video_id
self.comments = []
self.fieldnames = ["id", "author", "time", "timestamp", "text",
"likes", "hasReplies", "numReplies", "authorLink"]
def _format(self, comments, filtered_comments):
for comment in comments:
filtered_comments.append(
{k: comment[k] for k in self.fieldnames if k in comment})
if "replies" in comment:
self._format(comment["replies"], filtered_comments)
def _clear(self):
for comment in self.comments:
for el in comment.iteritems():
try:
comment[el[0]] = re.sub(
u"[^a-zA-Zа-яА-Я\s\d,.:?!()<>\"'-_;^*]+",
"", el[1])
except:
pass
def fetch_all_comments(self):
subprocess.call("node fetch-all-comments.js {} > fetched.json".
format(self.video_id),
shell=True)
with open("fetched.json") as fetched:
self.comments = json.load(fetched)
os.remove("fetched.json")
filtered_comments = []
self._format(self.comments, filtered_comments)
self.comments = filtered_comments
self._clear()
return self.comments
def get_json(self):
with open("{}-comments.json".format(self.video_id), "w") as json_file:
json.dump(self.comments, json_file,
indent=4, sort_keys=True, separators=(',', ':'))
def get_csv(self):
with open("{}-comments.csv".format(self.video_id), "w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=self.fieldnames)
writer.writeheader()
for el in self.comments:
writer.writerow(dict((s[0],
unicode(s[1]).encode("utf-8")) for s in el.iteritems()))
| apache-2.0 |
abenzbiria/clients_odoo | openerp/tools/image.py | 70 | 9132 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from PIL import Image
from PIL import ImageEnhance
from random import randint
# ----------------------------------------
# Image resizing
# ----------------------------------------
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=False):
""" Function to resize an image. The image will be resized to the given
size, while keeping the aspect ratios, and holes in the image will be
filled with transparent background. The image will not be stretched if
smaller than the expected size.
Steps of the resizing:
- Compute width and height if not specified.
- if avoid_if_small: if both image sizes are smaller than the requested
sizes, the original image is returned. This is used to avoid adding
transparent content around images that we do not want to alter but
just resize if too big. This is used for example when storing images
in the 'image' field: we keep the original image, resized to a maximal
size, without adding transparent content around it if smaller.
- create a thumbnail of the source image through using the thumbnail
function. Aspect ratios are preserved when using it. Note that if the
source image is smaller than the expected size, it will not be
extended, but filled to match the size.
- create a transparent background that will hold the final image.
- paste the thumbnail on the transparent background and center it.
:param base64_source: base64-encoded version of the source
image; if False, returns False
:param size: 2-tuple(width, height). A None value for any of width or
height mean an automatically computed value based respectivelly
on height or width of the source image.
:param encoding: the output encoding
:param filetype: the output filetype, by default the source image's
:type filetype: str, any PIL image format (supported for creation)
:param avoid_if_small: do not resize if image height and width
are smaller than the expected size.
"""
if not base64_source:
return False
if size == (None, None):
return base64_source
image_stream = StringIO.StringIO(base64_source.decode(encoding))
image = Image.open(image_stream)
# store filetype here, as Image.new below will lose image.format
filetype = (filetype or image.format).upper()
filetype = {
'BMP': 'PNG',
}.get(filetype, filetype)
asked_width, asked_height = size
if asked_width is None:
asked_width = int(image.size[0] * (float(asked_height) / image.size[1]))
if asked_height is None:
asked_height = int(image.size[1] * (float(asked_width) / image.size[0]))
size = asked_width, asked_height
# check image size: do not create a thumbnail if avoiding smaller images
if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
return base64_source
if image.size != size:
# create a thumbnail: will resize and keep ratios, then sharpen for better looking result
image.thumbnail(size, Image.ANTIALIAS)
sharpener = ImageEnhance.Sharpness(image.convert('RGBA'))
resized_image = sharpener.enhance(2.0)
# create a transparent image for background and paste the image on it
image = Image.new('RGBA', size, (255, 255, 255, 0))
image.paste(resized_image, ((size[0] - resized_image.size[0]) / 2, (size[1] - resized_image.size[1]) / 2))
if image.mode not in ["1", "L", "P", "RGB", "RGBA"]:
image = image.convert("RGB")
background_stream = StringIO.StringIO()
image.save(background_stream, filetype)
return background_stream.getvalue().encode(encoding)
def image_resize_image_big(base64_source, size=(1204, 1024), encoding='base64', filetype=None, avoid_if_small=True):
""" Wrapper on image_resize_image, to resize images larger than the standard
'big' image size: 1024x1024px.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'medium'
image size: 180x180.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'small' image
size: 50x50.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
# ----------------------------------------
# Colors
# ---------------------------------------
def image_colorize(original, randomize=True, color=(255, 255, 255)):
""" Add a color to the transparent background of an image.
:param original: file object on the original image file
:param randomize: randomize the background color
:param color: background-color, if not randomize
"""
# create a new image, based on the original one
original = Image.open(StringIO.StringIO(original))
image = Image.new('RGB', original.size)
# generate the background color, past it as background
if randomize:
color = (randint(32, 224), randint(32, 224), randint(32, 224))
image.paste(color)
image.paste(original, mask=original)
# return the new image
buffer = StringIO.StringIO()
image.save(buffer, 'PNG')
return buffer.getvalue()
# ----------------------------------------
# Misc image tools
# ---------------------------------------
def image_get_resized_images(base64_source, return_big=False, return_medium=True, return_small=True,
big_name='image', medium_name='image_medium', small_name='image_small',
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False):
""" Standard tool function that returns a dictionary containing the
big, medium and small versions of the source image. This function
is meant to be used for the methods of functional fields for
models using images.
Default parameters are given to be used for the getter of functional
image fields, for example with res.users or res.partner. It returns
only image_medium and image_small values, to update those fields.
:param base64_source: base64-encoded version of the source
image; if False, all returnes values will be False
:param return_{..}: if set, computes and return the related resizing
of the image
:param {..}_name: key of the resized image in the return dictionary;
'image', 'image_medium' and 'image_small' by default.
:param avoid_resize_[..]: see avoid_if_small parameter
:return return_dict: dictionary with resized images, depending on
previous parameters.
"""
return_dict = dict()
if return_big:
return_dict[big_name] = image_resize_image_big(base64_source, avoid_if_small=avoid_resize_big)
if return_medium:
return_dict[medium_name] = image_resize_image_medium(base64_source, avoid_if_small=avoid_resize_medium)
if return_small:
return_dict[small_name] = image_resize_image_small(base64_source, avoid_if_small=avoid_resize_small)
return return_dict
if __name__=="__main__":
import sys
assert len(sys.argv)==3, 'Usage to Test: image.py SRC.png DEST.png'
img = file(sys.argv[1],'rb').read().encode('base64')
new = image_resize_image(img, (128,100))
file(sys.argv[2], 'wb').write(new.decode('base64'))
| agpl-3.0 |
CLOUGH/info3180-project-4 | server/lib/werkzeug/contrib/profiler.py | 315 | 4920 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys, time, os.path
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
By giving the `profile_dir` argument, pstat.Stats files are saved to that
directory, one file per request. Without it, a summary is printed to
`stream` instead.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
start = time.time()
p.runcall(runapp)
body = ''.join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
prof_filename = os.path.join(self._profile_dir,
'%s.%s.%06dms.%d.prof' % (
environ['REQUEST_METHOD'],
environ.get('PATH_INFO').strip('/').replace('/', '.') or 'root',
elapsed * 1000.0,
time.time()
))
p.dump_stats(prof_filename)
else:
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
| apache-2.0 |
Kronuz/pyScss | scss/scss_meta.py | 1 | 2192 | #-*- coding: utf-8 -*-
"""
pyScss, a Scss compiler for Python
@author German M. Bravo (Kronuz) <german.mb@gmail.com>
@version 1.3.7
@see https://github.com/Kronuz/pyScss
@copyright (c) 2012-2013 German M. Bravo (Kronuz)
@license MIT License
http://www.opensource.org/licenses/mit-license.php
pyScss compiles Scss, a superset of CSS that is more powerful, elegant and
easier to maintain than plain-vanilla CSS. The library acts as a CSS source code
preprocesor which allows you to use variables, nested rules, mixins, andhave
inheritance of rules, all with a CSS-compatible syntax which the preprocessor
then compiles to standard CSS.
Scss, as an extension of CSS, helps keep large stylesheets well-organized. It
borrows concepts and functionality from projects such as OOCSS and other similar
frameworks like as Sass. It's build on top of the original PHP xCSS codebase
structure but it's been completely rewritten, many bugs have been fixed and it
has been extensively extended to support almost the full range of Sass' Scss
syntax and functionality.
Bits of code in pyScss come from various projects:
Compass:
(c) 2009 Christopher M. Eppstein
http://compass-style.org/
Sass:
(c) 2006-2009 Hampton Catlin and Nathan Weizenbaum
http://sass-lang.com/
xCSS:
(c) 2010 Anton Pawlik
http://xcss.antpaw.org/docs/
This file defines Meta data, according to PEP314
(http://www.python.org/dev/peps/pep-0314/) which is common to both pyScss
and setup.py distutils.
We create this here so this information can be compatible with BOTH
Python 2.x and Python 3.x so setup.py can use it when building pyScss
for both Py3.x and Py2.x
"""
from __future__ import unicode_literals
import sys
VERSION_INFO = (1, 3, 7)
DATE_INFO = (2020, 3, 26) # YEAR, MONTH, DAY
VERSION = '.'.join(str(i) for i in VERSION_INFO)
REVISION = '%04d%02d%02d' % DATE_INFO
BUILD_INFO = "pyScss v" + VERSION + " (" + REVISION + ")"
AUTHOR = "German M. Bravo (Kronuz)"
AUTHOR_EMAIL = 'german.mb@gmail.com'
URL = 'http://github.com/Kronuz/pyScss'
DOWNLOAD_URL = 'http://github.com/Kronuz/pyScss/tarball/v' + VERSION
LICENSE = "MIT"
PROJECT = "pyScss"
| mit |
mwimble/ArduinoConroller | ewyn_ws/install/lib/python2.7/dist-packages/xbee/ieee.py | 12 | 6853 | """
ieee.py
By Paul Malmsten, 2010
Inspired by code written by Amit Synderman and Marco Sangalli
pmalmsten@gmail.com
This module provides an XBee (IEEE 802.15.4) API library.
"""
import struct
from xbee.base import XBeeBase
class XBee(XBeeBase):
"""
Provides an implementation of the XBee API for IEEE 802.15.4 modules
with recent firmware.
Commands may be sent to a device by instansiating this class with
a serial port object (see PySerial) and then calling the send
method with the proper information specified by the API. Data may
be read from a device syncronously by calling wait_read_frame. For
asynchronous reads, see the definition of XBeeBase.
"""
# Packets which can be sent to an XBee
# Format:
# {name of command:
# [{name:field name, len:field length, default: default value sent}
# ...
# ]
# ...
# }
api_commands = {"at":
[{'name':'id', 'len':1, 'default':'\x08'},
{'name':'frame_id', 'len':1, 'default':'\x00'},
{'name':'command', 'len':2, 'default':None},
{'name':'parameter', 'len':None, 'default':None}],
"queued_at":
[{'name':'id', 'len':1, 'default':'\x09'},
{'name':'frame_id', 'len':1, 'default':'\x00'},
{'name':'command', 'len':2, 'default':None},
{'name':'parameter', 'len':None, 'default':None}],
"remote_at":
[{'name':'id', 'len':1, 'default':'\x17'},
{'name':'frame_id', 'len':1, 'default':'\x00'},
# dest_addr_long is 8 bytes (64 bits), so use an unsigned long long
{'name':'dest_addr_long', 'len':8, 'default':struct.pack('>Q', 0)},
{'name':'dest_addr', 'len':2, 'default':'\xFF\xFE'},
{'name':'options', 'len':1, 'default':'\x02'},
{'name':'command', 'len':2, 'default':None},
{'name':'parameter', 'len':None, 'default':None}],
"tx_long_addr":
[{'name':'id', 'len':1, 'default':'\x00'},
{'name':'frame_id', 'len':1, 'default':'\x00'},
{'name':'dest_addr', 'len':8, 'default':None},
{'name':'options', 'len':1, 'default':'\x00'},
{'name':'data', 'len':None, 'default':None}],
"tx":
[{'name':'id', 'len':1, 'default':'\x01'},
{'name':'frame_id', 'len':1, 'default':'\x00'},
{'name':'dest_addr', 'len':2, 'default':None},
{'name':'options', 'len':1, 'default':'\x00'},
{'name':'data', 'len':None, 'default':None}]
}
# Packets which can be received from an XBee
# Format:
# {id byte received from XBee:
# {name: name of response
# structure:
# [ {'name': name of field, 'len':length of field}
# ...
# ]
# parse_as_io_samples:name of field to parse as io
# }
# ...
# }
#
api_responses = {"\x80":
{'name':'rx_long_addr',
'structure':
[{'name':'source_addr', 'len':8},
{'name':'rssi', 'len':1},
{'name':'options', 'len':1},
{'name':'rf_data', 'len':None}]},
"\x81":
{'name':'rx',
'structure':
[{'name':'source_addr', 'len':2},
{'name':'rssi', 'len':1},
{'name':'options', 'len':1},
{'name':'rf_data', 'len':None}]},
"\x82":
{'name':'rx_io_data_long_addr',
'structure':
[{'name':'source_addr_long','len':8},
{'name':'rssi', 'len':1},
{'name':'options', 'len':1},
{'name':'samples', 'len':None}],
'parse_as_io_samples':'samples'},
"\x83":
{'name':'rx_io_data',
'structure':
[{'name':'source_addr', 'len':2},
{'name':'rssi', 'len':1},
{'name':'options', 'len':1},
{'name':'samples', 'len':None}],
'parse_as_io_samples':'samples'},
"\x89":
{'name':'tx_status',
'structure':
[{'name':'frame_id', 'len':1},
{'name':'status', 'len':1}]},
"\x8a":
{'name':'status',
'structure':
[{'name':'status', 'len':1}]},
"\x88":
{'name':'at_response',
'structure':
[{'name':'frame_id', 'len':1},
{'name':'command', 'len':2},
{'name':'status', 'len':1},
{'name':'parameter', 'len':None}]},
"\x97":
{'name':'remote_at_response',
'structure':
[{'name':'frame_id', 'len':1},
{'name':'source_addr_long','len':8},
{'name':'source_addr', 'len':2},
{'name':'command', 'len':2},
{'name':'status', 'len':1},
{'name':'parameter', 'len':None}]},
}
def __init__(self, *args, **kwargs):
# Call the super class constructor to save the serial port
super(XBee, self).__init__(*args, **kwargs)
| gpl-2.0 |
tangyuxing/context | external/liblinear-1.94/python/liblinear.py | 37 | 8400 | #!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
from os import path
import sys
try:
dirname = path.dirname(path.abspath(__file__))
if sys.platform == 'win32':
liblinear = CDLL(path.join(dirname, r'..\windows\liblinear.dll'))
else:
liblinear = CDLL(path.join(dirname, '../liblinear.so.1'))
except:
# For unix the prefix 'lib' is not considered.
if find_library('linear'):
liblinear = CDLL(find_library('linear'))
elif find_library('liblinear'):
liblinear = CDLL(find_library('liblinear'))
else:
raise Exception('LIBLINEAR library not found.')
# Construct constants
SOLVER_TYPE = ['L2R_LR', 'L2R_L2LOSS_SVC_DUAL', 'L2R_L2LOSS_SVC', 'L2R_L1LOSS_SVC_DUAL',\
'MCSVM_CS', 'L1R_L2LOSS_SVC', 'L1R_LR', 'L2R_LR_DUAL', \
None, None, None, \
'L2R_L2LOSS_SVR', 'L2R_L2LOSS_SVR_DUAL', 'L2R_L1LOSS_SVR_DUAL']
for i, s in enumerate(SOLVER_TYPE):
if s is not None: exec("%s = %d" % (s , i))
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class feature_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def __str__(self):
return '%d:%g' % (self.index, self.value)
def gen_feature_nodearray(xi, feature_max=None, issparse=True):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
xi = [0] + xi # idx should start from 1
index_range = range(1, len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if issparse:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (feature_node * (len(index_range)+2))()
ret[-1].index = -1 # for bias term
ret[-2].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range :
max_idx = index_range[-1]
return ret, max_idx
class problem(Structure):
_names = ["l", "n", "y", "x", "bias"]
_types = [c_int, c_int, POINTER(c_double), POINTER(POINTER(feature_node)), c_double]
_fields_ = genFields(_names, _types)
def __init__(self, y, x, bias = -1):
if len(y) != len(x) :
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
self.bias = -1
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_feature_nodearray(xi)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = y[i]
self.x = (POINTER(feature_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
self.set_bias(bias)
def set_bias(self, bias):
if self.bias == bias:
return
if bias >= 0 and self.bias < 0:
self.n += 1
node = feature_node(self.n, bias)
if bias < 0 and self.bias >= 0:
self.n -= 1
node = feature_node(-1, bias)
for xi in self.x_space:
xi[-2] = node
self.bias = bias
class parameter(Structure):
_names = ["solver_type", "eps", "C", "nr_weight", "weight_label", "weight", "p"]
_types = [c_int, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double), c_double]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def __str__(self):
s = ''
attrs = parameter._names + list(self.__dict__.keys())
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
s += (' %s: %s\n' % (attr, val))
s = s.strip()
return s
def set_to_default_values(self):
self.solver_type = L2R_L2LOSS_SVC_DUAL
self.eps = float('inf')
self.C = 1
self.p = 0.1
self.nr_weight = 0
self.weight_label = (c_int * 0)()
self.weight = (c_double * 0)()
self.bias = -1
self.cross_validation = False
self.nr_fold = 0
self.print_func = cast(None, PRINT_STRING_FUN)
def parse_options(self, options):
if isinstance(options, list):
argv = options
elif isinstance(options, str):
argv = options.split()
else:
raise TypeError("arg 1 should be a list or a str.")
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv) :
if argv[i] == "-s":
i = i + 1
self.solver_type = int(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-B":
i = i + 1
self.bias = float(argv[i])
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2 :
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
nr_weight = self.nr_weight
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
else :
raise ValueError("Wrong options")
i += 1
liblinear.set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
if self.eps == float('inf'):
if self.solver_type in [L2R_LR, L2R_L2LOSS_SVC]:
self.eps = 0.01
elif self.solver_type in [L2R_L2LOSS_SVR]:
self.eps = 0.001
elif self.solver_type in [L2R_L2LOSS_SVC_DUAL, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L2R_LR_DUAL]:
self.eps = 0.1
elif self.solver_type in [L1R_L2LOSS_SVC, L1R_LR]:
self.eps = 0.01
elif self.solver_type in [L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL]:
self.eps = 0.1
class model(Structure):
_names = ["param", "nr_class", "nr_feature", "w", "label", "bias"]
_types = [parameter, c_int, c_int, POINTER(c_double), POINTER(c_int), c_double]
_fields_ = genFields(_names, _types)
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
liblinear.free_and_destroy_model(pointer(self))
def get_nr_feature(self):
return liblinear.get_nr_feature(self)
def get_nr_class(self):
return liblinear.get_nr_class(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
liblinear.get_labels(self, labels)
return labels[:nr_class]
def is_probability_model(self):
return (liblinear.check_probability_model(self) == 1)
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> model
Convert a ctypes POINTER(model) to a Python model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(liblinear.train, POINTER(model), [POINTER(problem), POINTER(parameter)])
fillprototype(liblinear.cross_validation, None, [POINTER(problem), POINTER(parameter), c_int, POINTER(c_double)])
fillprototype(liblinear.predict_values, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
fillprototype(liblinear.predict, c_double, [POINTER(model), POINTER(feature_node)])
fillprototype(liblinear.predict_probability, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
fillprototype(liblinear.save_model, c_int, [c_char_p, POINTER(model)])
fillprototype(liblinear.load_model, POINTER(model), [c_char_p])
fillprototype(liblinear.get_nr_feature, c_int, [POINTER(model)])
fillprototype(liblinear.get_nr_class, c_int, [POINTER(model)])
fillprototype(liblinear.get_labels, None, [POINTER(model), POINTER(c_int)])
fillprototype(liblinear.free_model_content, None, [POINTER(model)])
fillprototype(liblinear.free_and_destroy_model, None, [POINTER(POINTER(model))])
fillprototype(liblinear.destroy_param, None, [POINTER(parameter)])
fillprototype(liblinear.check_parameter, c_char_p, [POINTER(problem), POINTER(parameter)])
fillprototype(liblinear.check_probability_model, c_int, [POINTER(model)])
fillprototype(liblinear.set_print_string_function, None, [CFUNCTYPE(None, c_char_p)])
| bsd-2-clause |
bwohlberg/sporco | examples/scripts/tv/tvl1den_gry.py | 1 | 2764 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Greyscale ℓ1-TV Denoising
=========================
This example demonstrates the use of class :class:`.tvl1.TVL1Denoise` for removing salt & pepper noise from a greyscale image using Total Variation regularization with an ℓ1 data fidelity term (ℓ1-TV denoising).
"""
from __future__ import print_function
from builtins import input
import numpy as np
from sporco.admm import tvl1
from sporco import util
from sporco import signal
from sporco import metric
from sporco import plot
"""
Load reference image.
"""
img = util.ExampleImages().image('monarch.png', scaled=True,
idxexp=np.s_[:,160:672], gray=True)
"""
Construct test image corrupted by 20% salt & pepper noise.
"""
np.random.seed(12345)
imgn = signal.spnoise(img, 0.2)
"""
Set regularization parameter and options for ℓ1-TV denoising solver. The regularization parameter used here has been manually selected for good performance.
"""
lmbda = 8e-1
opt = tvl1.TVL1Denoise.Options({'Verbose': True, 'MaxMainIter': 200,
'RelStopTol': 5e-3, 'gEvalY': False,
'AutoRho': {'Enabled': True}})
"""
Create solver object and solve, returning the the denoised image ``imgr``.
"""
b = tvl1.TVL1Denoise(imgn, lmbda, opt)
imgr = b.solve()
"""
Display solve time and denoising performance.
"""
print("TVL1Denoise solve time: %5.2f s" % b.timer.elapsed('solve'))
print("Noisy image PSNR: %5.2f dB" % metric.psnr(img, imgn))
print("Denoised image PSNR: %5.2f dB" % metric.psnr(img, imgr))
"""
Display reference, corrupted, and denoised images.
"""
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.imview(img, title='Reference', fig=fig)
plot.subplot(1, 3, 2)
plot.imview(imgn, title='Corrupted', fig=fig)
plot.subplot(1, 3, 3)
plot.imview(imgr, title=r'Restored ($\ell_1$-TV)', fig=fig)
fig.show()
"""
Get iterations statistics from solver object and plot functional value, ADMM primary and dual residuals, and automatically adjusted ADMM penalty parameter against the iteration number.
"""
its = b.getitstat()
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(1, 3, 2)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'], fig=fig)
plot.subplot(1, 3, 3)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
# Wait for enter on keyboard
input()
| bsd-3-clause |
CWSL/cwsl-ctools | sdm/fast_extract/sdm_extract.py | 3 | 5924 | #!/usr/bin/env python
""" This is a specialised script to extract a time series or histogram from SDM CoD files.
It dumps straight to JSON format, and doesn't write out a netCDF file.
"""
import json
import argparse
import string
import numpy as np
import netCDF4 as nc4
from cod_file import CodFile
def main(args):
""" This script has not gone through a Code review
- it should be checked before becoming a production service. This is
because the exact date relationship between the reanalysis data
and the AWAP data should be confirmed. AWAP data for rainfall is based
on rain gauges, so the rainfall is recorded against the previous day.
Is this the same for ERA-INT?
"""
var_dict = {"rain": ("rr_calib", "rr", "rain"),
"tmin": ("tmin", "tmin", "tmin"),
"tmax": ("tmax", "tmax", "tmax")}
this_var = var_dict[args.variable]
# Extract the required values from the cod file.
var_dict = dict(var_name=this_var[0])
awap_pattern = string.Template("/local/ep1_1/data/staging_data/AWAP/daily_0.05/${var_name}/${var_name}_daily_0.05*.nc").substitute(var_dict)
input_awap = nc4.MFDataset(awap_pattern, aggdim="time")
in_var = input_awap.variables[this_var[1]]
lat_var = input_awap.variables["lat"]
lon_var = input_awap.variables["lon"]
# Grab the time series of interest.
y_val = get_index(args.latitude, lat_var)
x_val = get_index(args.longitude, lon_var)
base_ts = in_var[:, y_val, x_val]
# Ensure the base timeseries is a masked array.
base_ts = np.ma.masked_array(base_ts)
# Load in the CoD file.
cod = CodFile(args.cod_file)
times = input_awap.variables["time"]
indices = calculate_time_index(cod.projected_dates, times)
input_awap.close()
# Now pull out the required values.
outts = base_ts[indices]
# Filter bad values from the time series.
out_dates, out_values, num_missing = filter_timeseries(cod.base_dates, outts, this_var[1])
if args.output_type == "timeseries":
output = write_timeseries(out_dates, this_var[2], out_values, num_missing)
elif args.output_type == "histogram":
output = write_histogram(out_dates, this_var[2], out_values, int(args.bins), num_missing)
else:
raise Exception("output_type: {} not understood"
.format(args.output_type))
with open(args.outfile, 'w') as output_file:
output_file.write(json.dumps(output))
def filter_timeseries(date_list, timeseries, var_name):
""" Filter out invalid values in the timeseries."""
num_bad = len(timeseries) - timeseries.count()
# Filter out any masked (missing) values.
to_keep = np.ma.getmaskarray(timeseries) == False
output_ts = timeseries[to_keep]
output_dates = np.array(date_list)[to_keep]
# Remove anomalously low values from temperature fields.
# (temps are in Kelvin)
if var_name in ["tmax", "tmin"]:
num_bad += sum(output_ts < 100.0)
to_keep = output_ts >= 100.0
output_ts = output_ts[to_keep]
output_dates = output_dates[to_keep]
return output_dates, output_ts, num_bad
def write_timeseries(date_list, variable_name,
timeseries, missing_vals):
""" Create an output dictionary in timeseries form. """
output_strings = [datething.isoformat()
for datething in date_list]
output = {"times": output_strings,
variable_name: timeseries.tolist(),
"filtered_values": int(missing_vals)}
return output
def write_histogram(date_list, variable_name,
timeseries, bins, missing_vals):
""" Create an output dictionary in timeseries form. """
# Use the numpy histogram calculator.
counts, bins = np.histogram(timeseries, bins=bins)
# Write it out using json.dumps
# we write out the count in each bin, the
# bins and the total number of entries.
outbins = []
for i in xrange(len(bins)-1):
outbins.append("(" + str(bins[i]) + "," + str(bins[i+1]) + ")")
output = {"bins": outbins,
"counts": counts.tolist(),
"num_entries": len(timeseries),
"time_bounds": [date_list[0].isoformat(),
date_list[-1].isoformat()],
"filtered_values": int(missing_vals)}
return output
def get_index(value, nc_var):
""" Given a netCDF variable, get the index of a particular lat/lon point.
Rounds to the nearest value.
"""
n_steps = nc_var.shape[0] - 1
var_range = nc_var[-1] - nc_var[0]
step_size = var_range / n_steps
change = float(value) - nc_var[0]
index = int(round(change / step_size))
return index
def calculate_time_index(datething, nc_time):
""" Given a python datetime object, return the index of the matching time variable."""
num_steps = nc_time.shape[0] - 1
time_range = nc_time[-1] - nc_time[0]
each_step = time_range / num_steps
step = int(round(each_step))
axis_numbers = nc4.date2num(datething, nc_time.units, nc_time.calendar)
return_vals = (axis_numbers - nc_time[0]) / step
return(return_vals.astype(int))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("latitude", help="The latitude of the the time-series to extract")
parser.add_argument("longitude", help="The longitude of the time-series to extract")
parser.add_argument("variable", help="The variable name to extract")
parser.add_argument("output_type", help="The type of output (histogram or timeseries)")
parser.add_argument("bins", help="The number of bins for the output histogram")
parser.add_argument("cod_file", help="The path to the change-of-date file")
parser.add_argument("outfile", help="The path to write the output to")
args = parser.parse_args()
main(args)
| apache-2.0 |
qilicun/python | python2/PyMOTW-1.132/PyMOTW/urlparse/urlparse_urlparseattrs.py | 1 | 1566 | #!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Parsing URLs
"""
#end_pymotw_header
from urlparse import urlparse
parsed = urlparse('http://user:pass@NetLoc:80/path;parameters?query=argument#fragment')
print 'scheme :', parsed.scheme
print 'netloc :', parsed.netloc
print 'path :', parsed.path
print 'params :', parsed.params
print 'query :', parsed.query
print 'fragment:', parsed.fragment
print 'username:', parsed.username
print 'password:', parsed.password
print 'hostname:', parsed.hostname, '(netloc in lower case)'
print 'port :', parsed.port
| gpl-3.0 |
CatsAndDogsbvba/odoo | addons/l10n_gt/__openerp__.py | 260 | 2305 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009-2010 Soluciones Tecnologócias Prisma S.A. All Rights Reserved.
# José Rodrigo Fernández Menegazzo, Soluciones Tecnologócias Prisma S.A.
# (http://www.solucionesprisma.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# This module provides a minimal Guatemalan chart of accounts that can be use
# to build upon a more complex one. It also includes a chart of taxes and
# the Quetzal currency.
#
# This module is based on the UK minimal chart of accounts:
# Copyright (c) 2004-2009 Seath Solutions Ltd. All Rights Reserved.
# Geoff Gardiner, Seath Solutions Ltd (http://www.seathsolutions.com/)
#
# This module works with OpenERP 6.0
#
{
'name': 'Guatemala - Accounting',
'version': '3.0',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Guatemala.
=====================================================================
Agrega una nomenclatura contable para Guatemala. También icluye impuestos y
la moneda del Quetzal. -- Adds accounting chart for Guatemala. It also includes
taxes and the Quetzal currency.""",
'author': 'José Rodrigo Fernández Menegazzo',
'website': 'http://solucionesprisma.com/',
'depends': ['base', 'account', 'account_chart'],
'data': [
'account_types.xml',
'account_chart.xml',
'account_tax.xml',
'l10n_gt_base.xml',
],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
takis/odoo | addons/mail/mail_alias.py | 220 | 15533 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import unicodedata
from openerp.osv import fields, osv
from openerp.tools import ustr
from openerp.modules.registry import RegistryManager
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
# Inspired by http://stackoverflow.com/questions/517923
def remove_accents(input_str):
"""Suboptimal-but-better-than-nothing way to replace accented
latin letters by an ASCII equivalent. Will obviously change the
meaning of input_str and work only for some cases"""
input_str = ustr(input_str)
nkfd_form = unicodedata.normalize('NFKD', input_str)
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
class mail_alias(osv.Model):
"""A Mail Alias is a mapping of an email address with a given OpenERP Document
model. It is used by OpenERP's mail gateway when processing incoming emails
sent to the system. If the recipient address (To) of the message matches
a Mail Alias, the message will be either processed following the rules
of that alias. If the message is a reply it will be attached to the
existing discussion on the corresponding record, otherwise a new
record of the corresponding model will be created.
This is meant to be used in combination with a catch-all email configuration
on the company's mail server, so that as soon as a new mail.alias is
created, it becomes immediately usable and OpenERP will accept email for it.
"""
_name = 'mail.alias'
_description = "Email Aliases"
_rec_name = 'alias_name'
_order = 'alias_model_id, alias_name'
def _get_alias_domain(self, cr, uid, ids, name, args, context=None):
ir_config_parameter = self.pool.get("ir.config_parameter")
domain = ir_config_parameter.get_param(cr, uid, "mail.catchall.domain", context=context)
return dict.fromkeys(ids, domain or "")
_columns = {
'alias_name': fields.char('Alias Name',
help="The name of the email alias, e.g. 'jobs' if you want to catch emails for <jobs@example.odoo.com>",),
'alias_model_id': fields.many2one('ir.model', 'Aliased Model', required=True, ondelete="cascade",
help="The model (Odoo Document Kind) to which this alias "
"corresponds. Any incoming email that does not reply to an "
"existing record will cause the creation of a new record "
"of this model (e.g. a Project Task)",
# hack to only allow selecting mail_thread models (we might
# (have a few false positives, though)
domain="[('field_id.name', '=', 'message_ids')]"),
'alias_user_id': fields.many2one('res.users', 'Owner',
help="The owner of records created upon receiving emails on this alias. "
"If this field is not set the system will attempt to find the right owner "
"based on the sender (From) address, or will use the Administrator account "
"if no system user is found for that address."),
'alias_defaults': fields.text('Default Values', required=True,
help="A Python dictionary that will be evaluated to provide "
"default values when creating new records for this alias."),
'alias_force_thread_id': fields.integer('Record Thread ID',
help="Optional ID of a thread (record) to which all incoming "
"messages will be attached, even if they did not reply to it. "
"If set, this will disable the creation of new records completely."),
'alias_domain': fields.function(_get_alias_domain, string="Alias domain", type='char'),
'alias_parent_model_id': fields.many2one('ir.model', 'Parent Model',
help="Parent model holding the alias. The model holding the alias reference\n"
"is not necessarily the model given by alias_model_id\n"
"(example: project (parent_model) and task (model))"),
'alias_parent_thread_id': fields.integer('Parent Record Thread ID',
help="ID of the parent record holding the alias (example: project holding the task creation alias)"),
'alias_contact': fields.selection([
('everyone', 'Everyone'),
('partners', 'Authenticated Partners'),
('followers', 'Followers only'),
], string='Alias Contact Security', required=True,
help="Policy to post a message on the document using the mailgateway.\n"
"- everyone: everyone can post\n"
"- partners: only authenticated partners\n"
"- followers: only followers of the related document\n"),
}
_defaults = {
'alias_defaults': '{}',
'alias_user_id': lambda self, cr, uid, context: uid,
# looks better when creating new aliases - even if the field is informative only
'alias_domain': lambda self, cr, uid, context: self._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1],
'alias_contact': 'everyone',
}
_sql_constraints = [
('alias_unique', 'UNIQUE(alias_name)', 'Unfortunately this email alias is already used, please choose a unique one')
]
def _check_alias_defaults(self, cr, uid, ids, context=None):
try:
for record in self.browse(cr, uid, ids, context=context):
dict(eval(record.alias_defaults))
except Exception:
return False
return True
_constraints = [
(_check_alias_defaults, '''Invalid expression, it must be a literal python dictionary definition e.g. "{'field': 'value'}"''', ['alias_defaults']),
]
def name_get(self, cr, uid, ids, context=None):
"""Return the mail alias display alias_name, including the implicit
mail catchall domain if exists from config otherwise "New Alias".
e.g. `jobs@mail.odoo.com` or `jobs` or 'New Alias'
"""
res = []
for record in self.browse(cr, uid, ids, context=context):
if record.alias_name and record.alias_domain:
res.append((record['id'], "%s@%s" % (record.alias_name, record.alias_domain)))
elif record.alias_name:
res.append((record['id'], "%s" % (record.alias_name)))
else:
res.append((record['id'], _("Inactive Alias")))
return res
def _find_unique(self, cr, uid, name, alias_id=False, context=None):
"""Find a unique alias name similar to ``name``. If ``name`` is
already taken, make a variant by adding an integer suffix until
an unused alias is found.
"""
sequence = None
while True:
new_name = "%s%s" % (name, sequence) if sequence is not None else name
domain = [('alias_name', '=', new_name)]
if alias_id:
domain += [('id', '!=', alias_id)]
if not self.search(cr, uid, domain):
break
sequence = (sequence + 1) if sequence else 2
return new_name
def _clean_and_make_unique(self, cr, uid, name, alias_id=False, context=None):
# when an alias name appears to already be an email, we keep the local part only
name = remove_accents(name).lower().split('@')[0]
name = re.sub(r'[^\w+.]+', '-', name)
return self._find_unique(cr, uid, name, alias_id=alias_id, context=context)
def migrate_to_alias(self, cr, child_model_name, child_table_name, child_model_auto_init_fct,
alias_model_name, alias_id_column, alias_key, alias_prefix='', alias_force_key='', alias_defaults={},
alias_generate_name=False, context=None):
""" Installation hook to create aliases for all users and avoid constraint errors.
:param child_model_name: model name of the child class (i.e. res.users)
:param child_table_name: table name of the child class (i.e. res_users)
:param child_model_auto_init_fct: pointer to the _auto_init function
(i.e. super(res_users,self)._auto_init(cr, context=context))
:param alias_model_name: name of the aliased model
:param alias_id_column: alias_id column (i.e. self._columns['alias_id'])
:param alias_key: name of the column used for the unique name (i.e. 'login')
:param alias_prefix: prefix for the unique name (i.e. 'jobs' + ...)
:param alias_force_key': name of the column for force_thread_id;
if empty string, not taken into account
:param alias_defaults: dict, keys = mail.alias columns, values = child
model column name used for default values (i.e. {'job_id': 'id'})
:param alias_generate_name: automatically generate alias name using prefix / alias key;
default alias_name value is False because since 8.0 it is not required anymore
"""
if context is None:
context = {}
# disable the unique alias_id not null constraint, to avoid spurious warning during
# super.auto_init. We'll reinstall it afterwards.
alias_id_column.required = False
# call _auto_init
res = child_model_auto_init_fct(cr, context=context)
registry = RegistryManager.get(cr.dbname)
mail_alias = registry.get('mail.alias')
child_class_model = registry[child_model_name]
no_alias_ids = child_class_model.search(cr, SUPERUSER_ID, [('alias_id', '=', False)], context={'active_test': False})
# Use read() not browse(), to avoid prefetching uninitialized inherited fields
for obj_data in child_class_model.read(cr, SUPERUSER_ID, no_alias_ids, [alias_key]):
alias_vals = {'alias_name': False}
if alias_generate_name:
alias_vals['alias_name'] = '%s%s' % (alias_prefix, obj_data[alias_key])
if alias_force_key:
alias_vals['alias_force_thread_id'] = obj_data[alias_force_key]
alias_vals['alias_defaults'] = dict((k, obj_data[v]) for k, v in alias_defaults.iteritems())
alias_vals['alias_parent_thread_id'] = obj_data['id']
alias_create_ctx = dict(context, alias_model_name=alias_model_name, alias_parent_model_name=child_model_name)
alias_id = mail_alias.create(cr, SUPERUSER_ID, alias_vals, context=alias_create_ctx)
child_class_model.write(cr, SUPERUSER_ID, obj_data['id'], {'alias_id': alias_id}, context={'mail_notrack': True})
_logger.info('Mail alias created for %s %s (id %s)', child_model_name, obj_data[alias_key], obj_data['id'])
# Finally attempt to reinstate the missing constraint
try:
cr.execute('ALTER TABLE %s ALTER COLUMN alias_id SET NOT NULL' % (child_table_name))
except Exception:
_logger.warning("Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL",
child_table_name, 'alias_id', child_table_name, 'alias_id')
# set back the unique alias_id constraint
alias_id_column.required = True
return res
def create(self, cr, uid, vals, context=None):
""" Creates an email.alias record according to the values provided in ``vals``,
with 2 alterations: the ``alias_name`` value may be suffixed in order to
make it unique (and certain unsafe characters replaced), and
he ``alias_model_id`` value will set to the model ID of the ``model_name``
context value, if provided.
"""
if context is None:
context = {}
model_name = context.get('alias_model_name')
parent_model_name = context.get('alias_parent_model_name')
if vals.get('alias_name'):
vals['alias_name'] = self._clean_and_make_unique(cr, uid, vals.get('alias_name'), context=context)
if model_name:
model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', model_name)], context=context)[0]
vals['alias_model_id'] = model_id
if parent_model_name:
model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', parent_model_name)], context=context)[0]
vals['alias_parent_model_id'] = model_id
return super(mail_alias, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
""""give a unique alias name if given alias name is already assigned"""
ids = ids if isinstance(ids, (tuple, list)) else [ids]
if vals.get('alias_name') and ids:
vals['alias_name'] = self._clean_and_make_unique(cr, uid, vals.get('alias_name'), alias_id=ids[0], context=context)
return super(mail_alias, self).write(cr, uid, ids, vals, context=context)
def open_document(self, cr, uid, ids, context=None):
alias = self.browse(cr, uid, ids, context=context)[0]
if not alias.alias_model_id or not alias.alias_force_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': alias.alias_model_id.model,
'res_id': alias.alias_force_thread_id,
'type': 'ir.actions.act_window',
}
def open_parent_document(self, cr, uid, ids, context=None):
alias = self.browse(cr, uid, ids, context=context)[0]
if not alias.alias_parent_model_id or not alias.alias_parent_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': alias.alias_parent_model_id.model,
'res_id': alias.alias_parent_thread_id,
'type': 'ir.actions.act_window',
}
| agpl-3.0 |
gemmaan/moviesenal | Hasil/Lib/encodings/cp860.py | 593 | 34937 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP860.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp860',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x008c: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x008f: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x0092: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x0099: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0x0084 -> LATIN SMALL LETTER A WITH TILDE
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xc1' # 0x0086 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xca' # 0x0089 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xcd' # 0x008b -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xd4' # 0x008c -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc3' # 0x008e -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc2' # 0x008f -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xc0' # 0x0091 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc8' # 0x0092 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0x0094 -> LATIN SMALL LETTER O WITH TILDE
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xda' # 0x0096 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xcc' # 0x0098 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd5' # 0x0099 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u20a7' # 0x009e -> PESETA SIGN
u'\xd3' # 0x009f -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xd2' # 0x00a9 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x0091, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x0086, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x008f, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x008e, # LATIN CAPITAL LETTER A WITH TILDE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0092, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0089, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cc: 0x0098, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x008b, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00a9, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x009f, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x008c, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x0099, # LATIN CAPITAL LETTER O WITH TILDE
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x0096, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x0084, # LATIN SMALL LETTER A WITH TILDE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x0094, # LATIN SMALL LETTER O WITH TILDE
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
manjunaths/tensorflow | tensorflow/compiler/tests/pooling_ops_test.py | 70 | 16927 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs
"""
test_configs = ["NHWC", "NCHW"]
return test_configs
class PoolingTest(XLATestCase):
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, currently only co.MaxPool.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
"""
total_size = np.prod(input_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32)
x = x.reshape(input_sizes)
with self.test_session() as sess:
with self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = inputs
if data_format == "NCHW":
t = NHWCToNCHW(t)
ksize = NHWCToNCHW(ksize)
strides = NHWCToNCHW(strides)
t = pool_func(t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
t = NCHWToNHWC(t)
actual = sess.run(t, {inputs: x})
self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6)
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected)
def testMaxPoolValidPadding(self):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testMaxPoolSamePadding(self):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingNonSquareWindow(self):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0])
def testMaxPoolValidPaddingUnevenStride(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0])
def testMaxPoolSamePaddingFilter4(self):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingFilter8(self):
expected_output = [
145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,
163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,
181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,
191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,
307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,
317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,
407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,
469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,
487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0])
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0])
def testKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33])
def testKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11])
# Average pooling
def testAvgPoolValidPadding(self):
expected_output = [7, 8, 9]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testAvgPoolSamePadding(self):
expected_output = [7., 8., 9., 11.5, 12.5, 13.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
class PoolGradTest(XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
def _VerifyOneTest(self, pool_func, pool_grad_func, input_sizes, ksize,
strides, padding, data_format):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
"""
total_size = np.prod(input_sizes)
x = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_sizes)
with self.test_session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device(self.CPU_DEVICE):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device(self.CPU_DEVICE):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
xla_inputs = inputs
xla_outputs = outputs
xla_output_gradients = output_gradients
xla_ksize = ksize
xla_strides = strides
if data_format == "NCHW":
xla_inputs = NHWCToNCHW(inputs)
xla_outputs = NHWCToNCHW(outputs)
xla_output_gradients = NHWCToNCHW(output_gradients)
xla_ksize = NHWCToNCHW(ksize)
xla_strides = NHWCToNCHW(strides)
actual_input_gradients = pool_grad_func(
xla_inputs,
xla_outputs,
xla_output_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_input_gradients = NCHWToNHWC(actual_input_gradients)
actual = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals.flatten(),
actual.flatten(),
rtol=1e-5,
atol=1e-6)
self.assertShapeEqual(actual, inputs)
def _VerifyValues(self, pool_func, pool_grad_func, input_sizes, ksize,
strides, padding):
"""Verifies the output values of the pooling function.
Args:
pool_func: Pooling function to be called, e.g., tf.nn.max_pool
pool_grad_func: Corresponding pooling gradient function.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, pool_grad_func, input_sizes, ksize,
strides, padding, data_format)
def _TestPooling(self, forward_op, backward_op):
# VALID padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID")
# SAME padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME")
# SAME padding, non square window
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME")
# VALID padding, uneven stride
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID")
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID")
# SAME padding, size 4 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME")
# SAME padding, size 8 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME")
def testMaxPool(self):
self._TestPooling(nn_ops.max_pool, gen_nn_ops._max_pool_grad)
def testAvgPool(self):
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
data_format):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops._avg_pool_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.
def testMaxPoolKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops._max_pool_grad,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID")
def testMaxPoolKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops._max_pool_grad,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops._max_pool_grad,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
lmregus/mywebsite | app/models/skill.py | 1 | 1139 | from server import db
from server import app
class Skill(db.Model):
__tablename__ = "skill"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(64))
category = db.Column(db.String(64))
proficiency = db.Column(db.String(3))
def get_all(self):
skills = Skill.query.all()
return skills
def get_by_id(self, skill_id):
skill = Skill.query.get(skill_id)
return skill
def create(self, data):
skill = Skill()
skill.title = data['title']
skill.category = data['category']
skill.proficiency = data['proficiency']
db.session.add(skill)
db.session.commit()
return skill
def update(self, data):
skill = Skill.query.get(data['id'])
skill.title = data['title']
skill.category = data['category']
skill.proficiency = data['proficiency']
db.session.add(skill)
db.session.commit()
return skill
def delete(self, skill_id):
skill = Skill.query.get(skill_id)
db.session.delete(skill)
db.session.commit()
return skill
| mit |
Explosion-y6/android_kernel_huawei_msm8909 | tools/perf/tests/attr.py | 3174 | 9441 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
jungle90/Openstack-Swift-I-O-throttler | build/lib.linux-x86_64-2.7/swift/cli/form_signature.py | 17 | 5260 | # Copyright (c) 2010-2012 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for generating a form signature for use with FormPost middleware.
"""
import hmac
from hashlib import sha1
from os.path import basename
from time import time
def main(argv):
if len(argv) != 7:
prog = basename(argv[0])
print 'Syntax: %s <path> <redirect> <max_file_size> ' \
'<max_file_count> <seconds> <key>' % prog
print
print 'Where:'
print ' <path> The prefix to use for form uploaded'
print ' objects. For example:'
print ' /v1/account/container/object_prefix_ would'
print ' ensure all form uploads have that path'
print ' prepended to the browser-given file name.'
print ' <redirect> The URL to redirect the browser to after'
print ' the uploads have completed.'
print ' <max_file_size> The maximum file size per file uploaded.'
print ' <max_file_count> The maximum number of uploaded files'
print ' allowed.'
print ' <seconds> The number of seconds from now to allow'
print ' the form post to begin.'
print ' <key> The X-Account-Meta-Temp-URL-Key for the'
print ' account.'
print
print 'Example output:'
print ' Expires: 1323842228'
print ' Signature: 18de97e47345a82c4dbfb3b06a640dbb'
print
print 'Sample form:'
print
print('NOTE: the <form> tag\'s "action" attribute does not contain '
'the Swift cluster\'s hostname.')
print 'You should manually add it before using the form.'
print
print('<form action="/v1/a/c/o" method="POST" '
'enctype="multipart/form-data">')
print ' <input type="hidden" name="max_file_size" value="123" />'
print ' ... more HTML ...'
print ' <input type="submit" />'
print '</form>'
return 1
path, redirect, max_file_size, max_file_count, seconds, key = argv[1:]
try:
max_file_size = int(max_file_size)
except ValueError:
max_file_size = -1
if max_file_size < 0:
print 'Please use a <max_file_size> value greater than or equal to 0.'
return 1
try:
max_file_count = int(max_file_count)
except ValueError:
max_file_count = 0
if max_file_count < 1:
print 'Please use a positive <max_file_count> value.'
return 1
try:
expires = int(time() + int(seconds))
except ValueError:
expires = 0
if expires < 1:
print 'Please use a positive <seconds> value.'
return 1
parts = path.split('/', 4)
# Must be four parts, ['', 'v1', 'a', 'c'], must be a v1 request, have
# account and container values, and optionally have an object prefix.
if len(parts) < 4 or parts[0] or parts[1] != 'v1' or not parts[2] or \
not parts[3]:
print '<path> must point to a container at least.'
print 'For example: /v1/account/container'
print ' Or: /v1/account/container/object_prefix'
return 1
sig = hmac.new(key, '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size,
max_file_count, expires),
sha1).hexdigest()
print ' Expires:', expires
print 'Signature:', sig
print ''
print('Sample form:\n')
print('NOTE: the <form> tag\'s "action" attribute does not '
'contain the Swift cluster\'s hostname.')
print('You should manually add it before using the form.\n')
print('<form action="%s" method="POST" enctype="multipart/form-data">'
% path)
if redirect:
print(' <input type="hidden" name="redirect" value="%s" />'
% redirect)
print(' <input type="hidden" name="max_file_size" value="%d" />'
% max_file_size)
print(' <input type="hidden" name="max_file_count" value="%d" />'
% max_file_count)
print(' <input type="hidden" name="expires" value="%d" />' % expires)
print(' <input type="hidden" name="signature" value="%s" />' % sig)
print(' <!-- This signature allows for at most %d files, -->'
% max_file_count)
print(' <!-- but it may also have any smaller number. -->')
print(' <!-- Remove file inputs as needed. -->')
for i in range(max_file_count):
print(' <input type="file" name="file%d" />' % i)
print(' <br />')
print(' <input type="submit" />')
print('</form>')
return 0
| apache-2.0 |
yang-guangliang/android_guard | examples/dad_print.py | 13 | 9740 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys
sys.path.append('./')
from androguard.core.bytecodes import apk, dvm
from androguard.core.analysis.analysis import uVMAnalysis
from androguard.decompiler.dad.decompile import DvMethod
from androguard.decompiler.dad.instruction import Constant, BinaryCompExpression
class PrintVisitor(object):
def __init__(self, graph):
self.graph = graph
self.visited_nodes = set()
self.loop_follow = [None]
self.latch_node = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.next_case = None
def visit_ins(self, ins):
return ins.visit(self)
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1]):
return
if node in self.visited_nodes:
return
self.visited_nodes.add(node)
node.visit(self)
def visit_loop_node(self, loop):
print '- Loop node', loop.num
follow = loop.get_loop_follow()
if follow is None and not loop.looptype.endless():
exit('Loop has no follow !', 'error')
if loop.looptype.pretest():
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
cnd = loop.visit_cond(self)
print 'while(%s) {' % cnd
elif loop.looptype.posttest():
print 'do {'
self.latch_node.append(loop.latch)
elif loop.looptype.endless():
print 'while(true) {'
pass
self.loop_follow.append(follow)
if loop.looptype.pretest():
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
if loop.looptype.pretest():
print '}'
elif loop.looptype.posttest():
print '} while(',
self.latch_node.pop()
loop.latch.visit_cond(self)
print ')'
else:
self.visit_node(loop.latch)
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
print '- Cond node', cond.num
follow = cond.get_if_follow()
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
cond.visit_cond(self)
self.visit_node(cond.false)
elif follow is not None:
is_else = not (follow in (cond.true, cond.false))
if (cond.true in (follow, self.next_case) or
cond.num > cond.true.num):
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if not cond.true in self.visited_nodes:
cnd = cond.visit_cond(self)
print 'if (%s) {' % cnd
self.visit_node(cond.true)
if is_else and not cond.false in self.visited_nodes:
print '} else {'
self.visit_node(cond.false)
print '}'
self.if_follow.pop()
self.visit_node(follow)
else:
cond.visit_cond(self)
self.visit_node(cond.true)
self.visit_node(cond.false)
def visit_short_circuit_condition(self, nnot, aand, cond1, cond2):
if nnot:
cond1.neg()
cond1.visit_cond(self)
cond2.visit_cond(self)
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
self.visit_ins(switch_ins)
follow = switch.switch_follow
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
for case in switch.node_to_case[node]:
pass
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
default = None
self.visit_node(node)
if default not in (None, follow):
self.visit_node(default)
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
print '- Statement node', stmt.num
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 0:
return
follow = sucs[0]
self.visit_node(follow)
def visit_return_node(self, ret):
print '- Return node', ret.num
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins)
def visit_constant(self, cst):
return cst
def visit_base_class(self, cls):
return cls
def visit_variable(self, var):
return 'v%s' % var
def visit_param(self, param):
return 'p%s' % param
def visit_this(self):
return 'this'
def visit_assign(self, lhs, rhs):
if lhs is None:
rhs.visit(self)
return
l = lhs.visit(self)
r = rhs.visit(self)
print '%s = %s;' % (l, r)
def visit_move_result(self, lhs, rhs):
l = lhs.visit(self)
r = rhs.visit(self)
print '%s = %s;' % (l, r)
def visit_move(self, lhs, rhs):
if lhs is rhs:
return
l = lhs.visit(self)
r = rhs.visit(self)
print '%s = %s;' % (l, r)
def visit_astore(self, array, index, rhs):
arr = array.visit(self)
if isinstance(index, Constant):
idx = index.visit(self, 'I')
else:
idx = index.visit(self)
r = rhs.visit(self)
print '%s[%s] = %s' % (arr, idx, r)
def visit_put_static(self, cls, name, rhs):
r = rhs.visit(self)
return '%s.%s = %s' % (cls, name, r)
def visit_put_instance(self, lhs, name, rhs):
l = lhs.visit(self)
r = rhs.visit(self)
return '%s.%s = %s' % (l, name, r)
def visit_new(self, atype):
pass
def visit_invoke(self, name, base, args):
base.visit(self)
for arg in args:
arg.visit(self)
def visit_return_void(self):
print 'return;'
def visit_return(self, arg):
a = arg.visit(self)
print 'return %s;' % a
def visit_nop(self):
pass
def visit_switch(self, arg):
arg.visit(self)
def visit_check_cast(self, arg, atype):
arg.visit(self)
def visit_aload(self, array, index):
arr = array.visit(self)
idx = index.visit(self)
return '%s[%s]' % (arr, idx)
def visit_alength(self, array):
res = array.visit(self)
return '%s.length' % res
def visit_new_array(self, atype, size):
size.visit(self)
def visit_filled_new_array(self, atype, size, args):
atype.visit(self)
size.visit(self)
for arg in args:
arg.visit(self)
def visit_fill_array(self, array, value):
array.visit(self)
def visit_monitor_enter(self, ref):
ref.visit(self)
def visit_monitor_exit(self, ref):
pass
def visit_throw(self, ref):
ref.visit(self)
def visit_binary_expression(self, op, arg1, arg2):
val1 = arg1.visit(self)
val2 = arg2.visit(self)
return '%s %s %s' % (val1, op, val2)
def visit_unary_expression(self, op, arg):
arg.visit(self)
def visit_cast(self, op, arg):
a = arg.visit(self)
return '(%s %s)' % (op, a)
def visit_cond_expression(self, op, arg1, arg2):
val1 = arg1.visit(self)
val2 = arg2.visit(self)
return '%s %s %s' % (val1, op, val2)
def visit_condz_expression(self, op, arg):
if isinstance(arg, BinaryCompExpression):
arg.op = op
arg.visit(self)
else:
arg.visit(self)
def visit_get_instance(self, arg, name):
arg.visit(self)
def visit_get_static(self, cls, name):
return '%s.%s' % (cls, name)
TEST = '../DroidDream/magicspiral.apk'
vm = dvm.DalvikVMFormat(apk.APK(TEST).get_dex())
vma = uVMAnalysis(vm)
method = vm.get_method('crypt')[0]
method.show()
amethod = vma.get_method(method)
dvmethod = DvMethod(amethod)
dvmethod.process() # build IR Form / control flow...
graph = dvmethod.graph
print 'Entry block : %s\n' % graph.get_entry()
for block in graph: # graph.get_rpo() to iterate in reverse post order
print 'Block : %s' % block
for ins in block.get_ins():
print ' - %s' % ins
print
visitor = PrintVisitor(graph)
graph.get_entry().visit(visitor)
| apache-2.0 |
adhoc-dev/oca-account-financial-tools | currency_rate_update/services/update_service_RO_BNR.py | 41 | 4466 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 CamptoCamp. All rights reserved.
# @author Nicolas Bessi
#
# Abstract class to fetch rates from National Bank of Romania
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .currency_getter_interface import Currency_getter_interface
from datetime import datetime, timedelta
import logging
_logger = logging.getLogger(__name__)
class RO_BNR_getter(Currency_getter_interface):
"""Implementation of Currency_getter_factory interface for BNR service"""
def rate_retrieve(self, dom, ns, curr):
""" Parse a dom node to retrieve-
currencies data"""
res = {}
xpath_rate_currency = "/def:DataSet/def:Body/def:Cube/def:Rate" + \
"[@currency='%s']/text()" % (curr.upper())
xpath_rate_ref = "/def:DataSet/def:Body/def:Cube/def:Rate" + \
"[@currency='%s']/@multiplier" % (curr.upper())
res['rate_currency'] = float(dom.xpath(xpath_rate_currency,
namespaces=ns)[0])
try:
res['rate_ref'] = float(dom.xpath(xpath_rate_ref,
namespaces=ns)[0])
except:
res['rate_ref'] = 1
return res
def get_updated_currency(self, currency_array, main_currency,
max_delta_days):
"""implementation of abstract method of Curreny_getter_interface"""
url = 'http://www.bnr.ro/nbrfxrates.xml'
# we do not want to update the main currency
if main_currency in currency_array:
currency_array.remove(main_currency)
# Move to new XML lib cf Launchpad bug #645263
from lxml import etree
_logger.debug("BNR currency rate service : connecting...")
rawfile = self.get_url(url)
dom = etree.fromstring(rawfile)
adminch_ns = {'def': 'http://www.bnr.ro/xsd'}
rate_date = dom.xpath('/def:DataSet/def:Body/def:Cube/@date',
namespaces=adminch_ns)[0]
rate_date_datetime = datetime.strptime(rate_date, '%Y-%m-%d') + \
timedelta(days=1)
self.check_rate_date(rate_date_datetime, max_delta_days)
# we dynamically update supported currencies
self.supported_currency_array = dom.xpath(
"/def:DataSet/def:Body/" + "def:Cube/def:Rate/@currency",
namespaces=adminch_ns)
self.supported_currency_array = [
x.upper() for x in self.supported_currency_array]
self.supported_currency_array.append('RON')
self.validate_cur(main_currency)
if main_currency != 'RON':
main_curr_data = self.rate_retrieve(dom, adminch_ns, main_currency)
# 1 MAIN_CURRENCY = main_rate RON
main_rate = main_curr_data['rate_currency'] / \
main_curr_data['rate_ref']
for curr in currency_array:
self.validate_cur(curr)
if curr == 'RON':
rate = main_rate
else:
curr_data = self.rate_retrieve(dom, adminch_ns, curr)
# 1 MAIN_CURRENCY = rate CURR
if main_currency == 'RON':
rate = curr_data['rate_ref'] / curr_data['rate_currency']
else:
rate = main_rate * curr_data['rate_ref'] / \
curr_data['rate_currency']
self.updated_currency[curr] = rate
_logger.debug("BNR Rate retrieved : 1 " + main_currency + ' = ' +
str(rate) + ' ' + curr)
return self.updated_currency, self.log_info
| agpl-3.0 |
LukeM12/samba | python/samba/tests/libsmb_samba_internal.py | 42 | 2412 | # Unix SMB/CIFS implementation.
# Copyright Volker Lendecke <vl@samba.org> 2012
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.samba3.libsmb_samba_internal."""
from samba.samba3 import libsmb_samba_internal
from samba.dcerpc import security
from samba.samba3 import param as s3param
from samba import credentials
import samba.tests
import threading
import sys
import os
class LibsmbTestCase(samba.tests.TestCase):
class OpenClose(threading.Thread):
def __init__(self, conn, filename, num_ops):
threading.Thread.__init__(self)
self.conn = conn
self.filename = filename
self.num_ops = num_ops
self.exc = False
def run(self):
c = self.conn
try:
for i in range(self.num_ops):
f = c.create(self.filename, CreateDisposition=3,
DesiredAccess=security.SEC_STD_DELETE)
c.delete_on_close(f, True)
c.close(f)
except Exception:
self.exc = sys.exc_info()
def test_OpenClose(self):
lp = s3param.get_context()
lp.load(os.getenv("SMB_CONF_PATH"))
creds = credentials.Credentials()
creds.set_username(os.getenv("USERNAME"))
creds.set_password(os.getenv("PASSWORD"))
c = libsmb_samba_internal.Conn(os.getenv("SERVER_IP"), "tmp", creds)
mythreads = []
for i in range(3):
t = LibsmbTestCase.OpenClose(c, "test" + str(i), 10)
mythreads.append(t)
for t in mythreads:
t.start()
for t in mythreads:
t.join()
if t.exc:
raise t.exc[0](t.exc[1])
if __name__ == "__main__":
import unittest
unittest.main()
| gpl-3.0 |
dhoomakethu/apocalypse | apocalypse/chaos/generator.py | 1 | 7790 | """
@author: dhoomakethu
"""
from __future__ import absolute_import, unicode_literals
import random
import re
from copy import deepcopy
from apocalypse.utils.backgroundJob import BackgroundJob
from apocalypse.utils.docker_client import get_host_ip
from apocalypse.utils.logger import get_logger
from apocalypse.utils.proc import TPExecutor
from apocalypse.chaos.executor import ChaosExecutor
from apocalypse.chaos.events import ChaosEvents, random_event_gen
chaos_logger = get_logger()
pid_regexp = re.compile(r"^\d+$")
ChaosEvents.update_events()
SECONDS_PER_TIME_UNIT = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
class ChaosGenerator(object):
"""
Generates chaos events periodically as supplied from cmd prompt or
from configuration file
"""
workers = {}
max_workers = 10
error = 0
error_threshold = 0 # threshold error count
device_map = []
event_gen = None
def __init__(self, chaos_app, events={}, random=True,
every=10, max_workers=10, **extras):
self._chaos_executor = ChaosExecutor(chaos_app=chaos_app)
self._chaos_list = events
self.random = random
self.max_workers = max_workers
self._parse_time_interval(every)
self.executor = TPExecutor(max_workers=self.max_workers)
self.executor.make_deamon()
self.bg_worker = BackgroundJob("Chaos Generator", self.time_interval,
self._create_chaos)
self.monitor = BackgroundJob("Monitor", 1, self.monitor_thread_pool)
self.dirty = False
self.chaos_triggered = False
self.extras = extras
def update_events(self, events):
self._chaos_list = {k: dict(v.items() + [("done", [])])
for k, v in events.items()}
self.event_gen = random_event_gen(self._chaos_list.keys())
def chaos(self, event, *args, **kwargs):
if not event:
chaos_logger.debug("Running random Chaos event")
return self._chaos_executor.random_run()
else:
chaos_logger.debug("Running Chaos event %s" % event)
return self._chaos_executor.run(event, *args, **kwargs)
def is_registered(self, event):
chaos_logger.info("Checking if '%s' is registered" % event)
return self._chaos_executor.is_registered(event)
def register(self, event):
chaos_logger.info("Registering event '%s' " % event)
return self._chaos_executor.register(event)
def unregister(self, event):
chaos_logger.info("Un-registering event '%s' " % event)
return self._chaos_executor.unregister(event)
def start(self):
"""
Creates chaos from the list of events every n seconds
Args:
events: event list from which chaos to be generated
random: randomly pick a chaos event
every: trigger chaos evey 'n' seconds
Returns:
None
"""
chaos_logger.info("Staring Chaos!!!!")
if self.extras['network_chaos']:
chaos_logger.warning("Running pre-requisites "
"for Network chaos events")
chaos_logger.warning("All set!!!")
if self.chaos_triggered:
self.stop()
if self.dirty:
self.bg_worker = BackgroundJob("Chaos Generator",
self.time_interval,
self._create_chaos)
self.executor = TPExecutor(max_workers=self.max_workers)
self.monitor = BackgroundJob("Monitor", 1,
self.monitor_thread_pool)
self.bg_worker.start()
self.monitor.start()
self.chaos_triggered = True
self.dirty = True
def _create_chaos(self):
"""
Creates chaos from the list of events every n seconds
Args:
events: event list from which chaos to be generated
random: randomly pick a chaos event
every: trigger chaos evey 'n' seconds
Returns:
None
"""
chaos_logger.debug("creating chaos")
if self._chaos_list:
e_name = self.event_gen.next()
event = self._chaos_list[e_name]
if not event['services']:
event['services'] = self.engine.get_services()
services = list(set(event['services']) ^ set(event["done"]))
if not services:
event["done"] = []
services = event['services']
service = random.choice(services) if services else None
event_copy = deepcopy(event)
if service:
event["done"].append(service)
event_copy.pop("done")
event_copy["services"] = [service]
future = self.executor.submit(self.chaos, e_name, **event_copy)
else:
future = self.executor.submit(self.chaos, None)
future.add_done_callback(self._tp_call_back)
def stop(self):
chaos_logger.info("Stopping chaos")
if self.chaos_triggered:
self.bg_worker.cancel()
self.chaos_triggered = False
self.executor.shutdown()
self.monitor.cancel()
def set_max_workers(self, count):
chaos_logger.info("Setting max worker threads to %s" % count)
self.max_workers = count
def list_events(self):
return self._chaos_executor.list_events()
@staticmethod
def _tp_call_back(future):
try:
res = future.result()
if len(res):
chaos_logger.debug("In future callback: "
"result returned : %s" % res)
return res
else:
chaos_logger.info("No Docker container with given info found")
chaos_logger.debug("In future callback: current Error "
"count : %s !!" % ChaosGenerator.error)
ChaosGenerator.error += 1
except Exception as e:
chaos_logger.critical("Exception while "
"running threadpool job : %s" % e)
ChaosGenerator.error += 1
return e
def monitor_thread_pool(self):
chaos_logger.debug("In monitor thread")
chaos_logger.debug("current Error "
"count : %s !!" % ChaosGenerator.error)
if ChaosGenerator.error >= ChaosGenerator.error_threshold:
chaos_logger.critical("Errors observed while "
"running chaos generator, check if "
"the environment is up and running")
chaos_logger.critical("Force Stopping Chaos!!!")
self.stop()
@classmethod
def set_threshold(cls, error_threshold):
cls.error_threshold = error_threshold
@property
def engine(self):
return self._chaos_executor.app
def _filter_network_chaos_events(self):
return {k: v for k, v in self._chaos_list.iteritems() if 'network' in k}
def host_ip(self):
return get_host_ip()
def _parse_time_interval(self, time_interval):
"""
Parse given time string expressed as <number>[s\m\h\d\w] to seconds
where , s = seconds, m = minutes, h = hours, d = days, w = weeks
e.g -> 1s = 1
1m = 60
1h = 3600
:return:
"""
if isinstance(time_interval, basestring):
self.time_interval = float(
time_interval[:-1]) * SECONDS_PER_TIME_UNIT[
time_interval[-1]
]
else:
self.time_interval = float(time_interval)
| mit |
bravo-zhang/spark | examples/src/main/python/mllib/random_rdd_generation.py | 51 | 1944 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Randomly generated RDDs.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.mllib.random import RandomRDDs
if __name__ == "__main__":
if len(sys.argv) not in [1, 2]:
print("Usage: random_rdd_generation", file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonRandomRDDGeneration")
numExamples = 10000 # number of examples to generate
fraction = 0.1 # fraction of data to sample
# Example: RandomRDDs.normalRDD
normalRDD = RandomRDDs.normalRDD(sc, numExamples)
print('Generated RDD of %d examples sampled from the standard normal distribution'
% normalRDD.count())
print(' First 5 samples:')
for sample in normalRDD.take(5):
print(' ' + str(sample))
print()
# Example: RandomRDDs.normalVectorRDD
normalVectorRDD = RandomRDDs.normalVectorRDD(sc, numRows=numExamples, numCols=2)
print('Generated RDD of %d examples of length-2 vectors.' % normalVectorRDD.count())
print(' First 5 samples:')
for sample in normalVectorRDD.take(5):
print(' ' + str(sample))
print()
sc.stop()
| apache-2.0 |
ekosareva/networking-vsphere | networking_vsphere/tests/unit/ml2/test_dvs_mechanism_driver.py | 2 | 10322 | # Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from neutron.common import constants as n_const
from neutron.tests import base
from networking_vsphere.common import constants as dvs_const
from networking_vsphere.common import exceptions
from networking_vsphere.common import vmware_conf
from networking_vsphere.ml2 import dvs_mechanism_driver
VALID_HYPERVISOR_TYPE = 'VMware vCenter Server'
INVALID_HYPERVISOR_TYPE = '_invalid_hypervisor_'
CONF = vmware_conf.CONF
class FAKE_SECURITY_GROUPS(object):
NEW = 'new_sg'
CONSTANT = 'constant_sg'
REMOVED = 'removed_sg'
CONSTANT_SG_RULE = {'constant rule': 'some_rule'}
class VMwareDVSMechanismDriverTestCase(base.BaseTestCase):
def setUp(self):
super(VMwareDVSMechanismDriverTestCase, self).setUp()
self.driver = dvs_mechanism_driver.VMwareDVSMechanismDriver()
self.driver._bound_ports = set()
self.dvs_notifier = mock.Mock()
self.driver.network_map = {'physnet1': mock.Mock()}
@mock.patch('neutron.db.api.get_session')
@mock.patch('networking_vsphere.utils.dvs_util.'
'create_network_map_from_config', return_value='network_map')
def test_initialize(self, create_network_map_from_config, get_session):
pass
def test_create_network_precommit_when_network_not_mapped(self):
context = self._create_network_context()
self.driver.network_map = {}
with mock.patch('networking_vsphere.common.dvs_agent_rpc_api.'
'DVSClientAPI.create_network_cast') as cast_mock:
self.driver.create_network_precommit(context)
if CONF.DVS.precreate_networks:
cast_mock.assert_called_once_with(
context.current, context.network_segments[0])
def test_delete_network_postcommit_when_network_is_not_mapped(self):
context = self._create_network_context()
self.driver.network_map = {}
with mock.patch('networking_vsphere.common.dvs_agent_rpc_api.'
'DVSClientAPI.delete_network_cast') as cast_mock:
self.driver.delete_network_postcommit(context)
cast_mock.assert_called_once_with(
context.current, context.network_segments[0])
def test_update_network_precommit(self):
context = self._create_network_context()
self.driver.network_map = {}
with mock.patch('networking_vsphere.common.dvs_agent_rpc_api.'
'DVSClientAPI.update_network_cast') as cast_mock:
self.driver.update_network_precommit(context)
cast_mock.assert_called_once_with(
context.current, context.network_segments[0], context.original)
@mock.patch('networking_vsphere.utils.compute_util.'
'get_hypervisors_by_host')
def test_update_port_postcommit(self, hypervisor_by_host):
hypervisor_by_host.return_value = mock.Mock(
hypervisor_type=VALID_HYPERVISOR_TYPE)
current = self._create_port_dict(vif_type='unbound',
status=n_const.PORT_STATUS_DOWN)
port_ctx = self._create_port_context(current=current)
segment = port_ctx.network.network_segments[0]
with mock.patch('networking_vsphere.common.dvs_agent_rpc_api.'
'DVSClientAPI.update_postcommit_port_call') as m_call:
self.driver.update_port_postcommit(port_ctx)
m_call.assert_called_once_with(
current, port_ctx.original, segment, port_ctx.host)
@mock.patch('networking_vsphere.utils.compute_util.'
'get_hypervisors_by_host')
def test_update_port_postcommit_non_vmware_port(self, hypervisor_by_host):
hypervisor_by_host.return_value = mock.Mock(
hypervisor_type=INVALID_HYPERVISOR_TYPE)
port_context = self._create_port_context()
with mock.patch('networking_vsphere.common.dvs_agent_rpc_api.'
'DVSClientAPI.update_postcommit_port_call') as m_call:
self.driver.update_port_postcommit(port_context)
self.assertEqual(m_call.call_count, 0)
@mock.patch('networking_vsphere.utils.compute_util.'
'get_hypervisors_by_host')
def test__port_belongs_to_vmware__unbinded_port(self, get_hypervisor):
context = self._create_port_context()
port = context.curren
port.pop('binding:host_id')
func = mock.Mock(__name__='dummy_name')
decorated = dvs_mechanism_driver.port_belongs_to_vmware(func)
self.assertFalse(decorated(None, context))
self.assertFalse(func.called)
@mock.patch('networking_vsphere.utils.compute_util.'
'get_hypervisors_by_host')
def test__port_belongs_to_vmware__invalid_hypervisor(
self, get_hypervisor):
context = self._create_port_context()
get_hypervisor.return_value = mock.Mock(
hypervisor_type=INVALID_HYPERVISOR_TYPE)
func = mock.Mock(__name__='dummy_name')
decorated = dvs_mechanism_driver.port_belongs_to_vmware(func)
self.assertFalse(decorated(None, context))
self.assertFalse(func.called)
@mock.patch('networking_vsphere.utils.compute_util.'
'get_hypervisors_by_host')
def test__port_belongs_to_vmware__not_found(self, get_hypervisor):
get_hypervisor.side_effect = exceptions.HypervisorNotFound
context = self._create_port_context()
func = mock.Mock(__name__='dummy_name', return_value=True)
decorated = dvs_mechanism_driver.port_belongs_to_vmware(func)
self.assertFalse(decorated(None, context))
self.assertFalse(func.called)
self.assertTrue(get_hypervisor.called)
@mock.patch('networking_vsphere.utils.compute_util.'
'get_hypervisors_by_host')
def test_delete_port_postcommit_when_KeyError(self, hypervisor_by_host):
hypervisor_by_host.return_value = mock.Mock(
hypervisor_type=VALID_HYPERVISOR_TYPE)
current = self._create_port_dict(vif_type='unbound',
status=n_const.PORT_STATUS_DOWN)
port_ctx = self._create_port_context(current=current)
segment = port_ctx.network.network_segments[0]
self.driver._bound_ports = set([1, 2])
with mock.patch('networking_vsphere.common.dvs_agent_rpc_api.'
'DVSClientAPI.delete_port_call') as call_mock:
self.driver.delete_port_postcommit(port_ctx)
call_mock.assert_called_once_with(
current, port_ctx.original, segment, port_ctx.host)
self.assertEqual(set([1, 2]), self.driver._bound_ports)
@mock.patch('networking_vsphere.utils.compute_util.'
'get_hypervisors_by_host')
def test_update_port_precomit_unbound_port(self, hypervisor_by_host):
hypervisor_by_host.return_value = mock.Mock(
hypervisor_type=VALID_HYPERVISOR_TYPE)
current = self._create_port_dict(vif_type='unbound',
status=n_const.PORT_STATUS_DOWN)
port_ctx = self._create_port_context(current=current)
network = port_ctx.network
with mock.patch('networking_vsphere.common.dvs_agent_rpc_api.'
'DVSClientAPI.bind_port_call') as cast_mock:
self.driver.update_port_precommit(port_ctx)
cast_mock.assert_called_once_with(
current, network.network_segments, network.current,
port_ctx.host)
@mock.patch('networking_vsphere.utils.compute_util.'
'get_hypervisors_by_host')
def test_update_port_precomit_not_unbound_port(self, hypervisor_by_host):
hypervisor_by_host.return_value = mock.Mock(
hypervisor_type=VALID_HYPERVISOR_TYPE)
current = self._create_port_dict(vif_type='binding_failed')
port_ctx = self._create_port_context(current=current)
with mock.patch('networking_vsphere.common.dvs_agent_rpc_api.'
'DVSClientAPI.bind_port_call') as cast_mock:
self.driver.update_port_precommit(port_ctx)
self.assertEqual(cast_mock.call_count, 0)
# TODO(ekosareva): add tests for _get_security_group_info func
# def test_get_security_group_info_when_security_group_rules_absent(self):
# pass
def _create_port_context(self, current=None, original=None, network=None):
context = mock.Mock(
current=current or self._create_port_dict(),
original=original or self._create_port_dict(),
network=network or self._create_network_context())
return context
def _create_port_dict(self, security_groups=None, vif_type=dvs_const.DVS,
status=n_const.PORT_STATUS_DOWN):
security_groups = security_groups or []
security_groups = list(security_groups)
security_groups.append(FAKE_SECURITY_GROUPS.CONSTANT)
return {
'id': '_dummy_port_id_%s' % id({}),
'admin_state_up': True,
'security_groups': security_groups,
'binding:host_id': '_id_server_',
'binding:vif_type': vif_type,
'status': status,
'security_group_rules': [CONSTANT_SG_RULE],
'binding:vif_details': {'dvs_port_key': '_dummy_dvs_port_key_'}
}
def _create_network_context(self, network_type='vlan'):
return mock.Mock(
current={'id': '_dummy_net_id_'},
network_segments=[{
'id': '_id_segment_',
'network_type': network_type,
'physical_network': 'physnet1'
}])
| apache-2.0 |
tgsd96/gargnotes | venv/lib/python2.7/site-packages/django/http/__init__.py | 33 | 1184 | from django.http.cookie import SimpleCookie, parse_cookie
from django.http.request import (HttpRequest, QueryDict,
RawPostDataException, UnreadablePostError, build_request_repr)
from django.http.response import (
HttpResponse, StreamingHttpResponse, FileResponse,
HttpResponseRedirect, HttpResponsePermanentRedirect,
HttpResponseNotModified, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseNotFound, HttpResponseNotAllowed, HttpResponseGone,
HttpResponseServerError, Http404, BadHeaderError, JsonResponse)
from django.http.utils import fix_location_header, conditional_content_removal
__all__ = [
'SimpleCookie', 'parse_cookie', 'HttpRequest', 'QueryDict',
'RawPostDataException', 'UnreadablePostError', 'build_request_repr',
'HttpResponse', 'StreamingHttpResponse', 'HttpResponseRedirect',
'HttpResponsePermanentRedirect', 'HttpResponseNotModified',
'HttpResponseBadRequest', 'HttpResponseForbidden', 'HttpResponseNotFound',
'HttpResponseNotAllowed', 'HttpResponseGone', 'HttpResponseServerError',
'Http404', 'BadHeaderError', 'fix_location_header', 'JsonResponse',
'FileResponse', 'conditional_content_removal',
]
| mit |
timthelion/FreeCAD | src/Mod/OpenSCAD/replaceobj.py | 9 | 4190 | #***************************************************************************
#* *
#* Copyright (c) 2012 Sebastian Hoogen <github@sebastianhoogen.de> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD OpenSCAD Workbench - replace object fuction"
__author__ = "Sebastian Hoogen"
__url__ = ["http://www.freecadweb.org"]
'''
This fucntions allows to replace an object in the feature hierarchy
'''
def replaceobj(parent,oldchild,newchild):
for propname in parent.PropertiesList:
propvalue=parent.getPropertyByName(propname)
if type(propvalue) == list:
bModified = False
for dontcare in range(propvalue.count(oldchild)):
propvalue[propvalue.index(oldchild)] = newchild
bModified = True
if bModified:
if propname == "ExpressionEngine":
# fixme: proper handling?
FreeCAD.Console.PrintWarning("Expressions in "+parent.Name+" need to be modified, but they were not. Please do that manually.")
continue
setattr(parent,propname,propvalue)
else:
if propvalue == oldchild:
setattr(parent,propname,newchild)
print propname, parent.getPropertyByName(propname)
#else: print propname,propvalue
parent.touch()
def replaceobjfromselection(objs):
# The Parent can be ommited as long as one object is orphaned
if len(objs)==2:
InListLength= tuple((len(obj.InList)) for obj in objs)
if InListLength == (0,1):
newchild,oldchild = objs
parent = oldchild.InList[0]
elif InListLength == (1,0):
oldchild,newchild = objs
parent = oldchild.InList[0]
else:
raise ValueError("Selection ambiguous. Please select oldchild,\
newchild and parent")
elif len(objs)==3:
if objs[2] in objs[0].InList: oldchild, newchild, parent = objs
elif objs[0] in objs[1].InList: parent, oldchild, newchild = objs
elif objs[0] in objs[2].InList: parent, newchild, oldchild = objs
elif objs[1] in objs[0].InList: oldchild, parent, newchild = objs
elif objs[1] in objs[2].InList: newchild, parent, oldchild = objs
elif objs[2] in objs[1].InList: newchild, oldchild, parent = objs
else:
raise ValueError("Cannot determin current parent-child relationship")
else:
raise ValueError("Wrong number of selected objects")
replaceobj(parent,oldchild,newchild)
parent.Document.recompute()
if __name__ == '__main__':
import FreeCAD,FreeCADGui
objs=[selobj.Object for selobj in FreeCADGui.Selection.getSelectionEx()]
replaceobjfromselection(objs)
| lgpl-2.1 |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyQt4/QtCore/QXmlStreamNotationDeclaration.py | 2 | 1921 | # encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python2.7/dist-packages/PyQt4/QtCore.so
# by generator 1.135
# no doc
# imports
import sip as __sip
class QXmlStreamNotationDeclaration(): # skipped bases: <type 'sip.simplewrapper'>
"""
QXmlStreamNotationDeclaration()
QXmlStreamNotationDeclaration(QXmlStreamNotationDeclaration)
"""
def name(self): # real signature unknown; restored from __doc__
""" QXmlStreamNotationDeclaration.name() -> QStringRef """
return QStringRef
def publicId(self): # real signature unknown; restored from __doc__
""" QXmlStreamNotationDeclaration.publicId() -> QStringRef """
return QStringRef
def systemId(self): # real signature unknown; restored from __doc__
""" QXmlStreamNotationDeclaration.systemId() -> QStringRef """
return QStringRef
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, QXmlStreamNotationDeclaration=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| gpl-2.0 |
AaronClaydon/blocks | blockly_build/closure-library/closure/bin/build/source_test.py | 153 | 3653 | #!/usr/bin/env python
#
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for source."""
__author__ = 'nnaze@google.com (Nathan Naze)'
import unittest
import source
class SourceTestCase(unittest.TestCase):
"""Unit test for source. Tests the parser on a known source input."""
def testSourceScan(self):
test_source = source.Source(_TEST_SOURCE)
self.assertEqual(set(['foo', 'foo.test']),
test_source.provides)
self.assertEqual(set(['goog.dom', 'goog.events.EventType']),
test_source.requires)
self.assertFalse(test_source.is_goog_module)
def testSourceScanBase(self):
test_source = source.Source(_TEST_BASE_SOURCE)
self.assertEqual(set(['goog']),
test_source.provides)
self.assertEqual(test_source.requires, set())
self.assertFalse(test_source.is_goog_module)
def testSourceScanBadBase(self):
def MakeSource():
source.Source(_TEST_BAD_BASE_SOURCE)
self.assertRaises(Exception, MakeSource)
def testSourceScanGoogModule(self):
test_source = source.Source(_TEST_MODULE_SOURCE)
self.assertEqual(set(['foo']),
test_source.provides)
self.assertEqual(set(['bar']),
test_source.requires)
self.assertTrue(test_source.is_goog_module)
def testStripComments(self):
self.assertEquals(
'\nvar foo = function() {}',
source.Source._StripComments((
'/* This is\n'
' a comment split\n'
' over multiple lines\n'
'*/\n'
'var foo = function() {}')))
def testGoogStatementsInComments(self):
test_source = source.Source(_TEST_COMMENT_SOURCE)
self.assertEqual(set(['foo']),
test_source.provides)
self.assertEqual(set(['goog.events.EventType']),
test_source.requires)
self.assertFalse(test_source.is_goog_module)
def testHasProvideGoog(self):
self.assertTrue(source.Source._HasProvideGoogFlag(_TEST_BASE_SOURCE))
self.assertTrue(source.Source._HasProvideGoogFlag(_TEST_BAD_BASE_SOURCE))
self.assertFalse(source.Source._HasProvideGoogFlag(_TEST_COMMENT_SOURCE))
_TEST_MODULE_SOURCE = """
goog.module('foo');
var b = goog.require('bar');
"""
_TEST_SOURCE = """// Fake copyright notice
/** Very important comment. */
goog.provide('foo');
goog.provide('foo.test');
goog.require('goog.dom');
goog.require('goog.events.EventType');
function foo() {
// Set bar to seventeen to increase performance.
this.bar = 17;
}
"""
_TEST_COMMENT_SOURCE = """// Fake copyright notice
goog.provide('foo');
/*
goog.provide('foo.test');
*/
/*
goog.require('goog.dom');
*/
// goog.require('goog.dom');
goog.require('goog.events.EventType');
function bar() {
this.baz = 55;
}
"""
_TEST_BASE_SOURCE = """
/**
* @fileoverview The base file.
* @provideGoog
*/
var goog = goog || {};
"""
_TEST_BAD_BASE_SOURCE = """
/**
* @fileoverview The base file.
* @provideGoog
*/
goog.provide('goog');
"""
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ESOedX/edx-platform | lms/djangoapps/survey/tests/test_models.py | 1 | 11080 | """
Python tests for the Survey models
"""
from __future__ import absolute_import
from collections import OrderedDict
import ddt
import six
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.test.client import Client
from survey.exceptions import SurveyFormNameAlreadyExists, SurveyFormNotFound
from survey.models import SurveyAnswer, SurveyForm
@ddt.ddt
class SurveyModelsTests(TestCase):
"""
All tests for the Survey models.py file
"""
def setUp(self):
"""
Set up the test data used in the specific tests
"""
super(SurveyModelsTests, self).setUp()
self.client = Client()
# Create two accounts
self.password = 'abc'
self.student = User.objects.create_user('student', 'student@test.com', self.password)
self.student2 = User.objects.create_user('student2', 'student2@test.com', self.password)
self.test_survey_name = 'TestForm'
self.test_form = '<li><input name="field1" /></li><li><input name="field2" /></li><li><select name="ddl"><option>1</option></select></li>'
self.test_form_update = '<input name="field1" />'
self.course_id = 'foo/bar/baz'
self.student_answers = OrderedDict({
'field1': 'value1',
'field2': 'value2',
})
self.student_answers_update = OrderedDict({
'field1': 'value1-updated',
'field2': 'value2-updated',
})
self.student_answers_update2 = OrderedDict({
'field1': 'value1-updated2',
})
self.student2_answers = OrderedDict({
'field1': 'value3'
})
def _create_test_survey(self):
"""
Helper method to set up test form
"""
return SurveyForm.create(self.test_survey_name, self.test_form)
def test_form_not_found_raise_exception(self):
"""
Asserts that when looking up a form that does not exist
"""
with self.assertRaises(SurveyFormNotFound):
SurveyForm.get(self.test_survey_name)
def test_form_not_found_none(self):
"""
Asserts that when looking up a form that does not exist
"""
self.assertIsNone(SurveyForm.get(self.test_survey_name, throw_if_not_found=False))
def test_create_new_form(self):
"""
Make sure we can create a new form a look it up
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
new_survey = SurveyForm.get(self.test_survey_name)
self.assertIsNotNone(new_survey)
self.assertEqual(new_survey.form, self.test_form)
def test_unicode_rendering(self):
"""
See if the survey form returns the expected unicode string
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
self.assertEquals(six.text_type(survey), self.test_survey_name)
def test_create_form_with_malformed_html(self):
"""
Make sure that if a SurveyForm is saved with unparseable html
an exception is thrown
"""
with self.assertRaises(ValidationError):
SurveyForm.create('badform', '<input name="oops" /><<<>')
def test_create_form_with_no_fields(self):
"""
Make sure that if a SurveyForm is saved without any named fields
an exception is thrown
"""
with self.assertRaises(ValidationError):
SurveyForm.create('badform', '<p>no input fields here</p>')
with self.assertRaises(ValidationError):
SurveyForm.create('badform', '<input id="input_without_name" />')
def test_create_form_already_exists(self):
"""
Make sure we can't create two surveys of the same name
"""
self._create_test_survey()
with self.assertRaises(SurveyFormNameAlreadyExists):
self._create_test_survey()
def test_create_form_update_existing(self):
"""
Make sure we can update an existing form
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey = SurveyForm.create(self.test_survey_name, self.test_form_update, update_if_exists=True)
self.assertIsNotNone(survey)
survey = SurveyForm.get(self.test_survey_name)
self.assertIsNotNone(survey)
self.assertEquals(survey.form, self.test_form_update)
def test_survey_has_no_answers(self):
"""
Create a new survey and assert that there are no answers to that survey
"""
survey = self._create_test_survey()
self.assertEquals(len(survey.get_answers()), 0)
def test_user_has_no_answers(self):
"""
Create a new survey with no answers in it and check that a user is determined to not have answered it
"""
survey = self._create_test_survey()
self.assertFalse(survey.has_user_answered_survey(self.student))
self.assertEquals(len(survey.get_answers()), 0)
@ddt.data(None, 'foo/bar/baz')
def test_single_user_answers(self, course_id):
"""
Create a new survey and add answers to it
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, course_id)
self.assertTrue(survey.has_user_answered_survey(self.student))
all_answers = survey.get_answers()
self.assertEquals(len(list(all_answers.keys())), 1)
self.assertIn(self.student.id, all_answers)
self.assertEquals(all_answers[self.student.id], self.student_answers)
answers = survey.get_answers(self.student)
self.assertEquals(len(list(answers.keys())), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(all_answers[self.student.id], self.student_answers)
# check that the course_id was set
answer_objs = SurveyAnswer.objects.filter(
user=self.student,
form=survey
)
for answer_obj in answer_objs:
if course_id:
self.assertEquals(six.text_type(answer_obj.course_key), course_id)
else:
self.assertIsNone(answer_obj.course_key)
def test_multiple_user_answers(self):
"""
Create a new survey and add answers to it
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, self.course_id)
survey.save_user_answers(self.student2, self.student2_answers, self.course_id)
self.assertTrue(survey.has_user_answered_survey(self.student))
all_answers = survey.get_answers()
self.assertEquals(len(list(all_answers.keys())), 2)
self.assertIn(self.student.id, all_answers)
self.assertIn(self.student2.id, all_answers)
self.assertEquals(all_answers[self.student.id], self.student_answers)
self.assertEquals(all_answers[self.student2.id], self.student2_answers)
answers = survey.get_answers(self.student)
self.assertEquals(len(list(answers.keys())), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(answers[self.student.id], self.student_answers)
answers = survey.get_answers(self.student2)
self.assertEquals(len(list(answers.keys())), 1)
self.assertIn(self.student2.id, answers)
self.assertEquals(answers[self.student2.id], self.student2_answers)
def test_update_answers(self):
"""
Make sure the update case works
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, self.course_id)
answers = survey.get_answers(self.student)
self.assertEquals(len(list(answers.keys())), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(answers[self.student.id], self.student_answers)
# update
survey.save_user_answers(self.student, self.student_answers_update, self.course_id)
answers = survey.get_answers(self.student)
self.assertEquals(len(list(answers.keys())), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(answers[self.student.id], self.student_answers_update)
# update with just a subset of the origin dataset
survey.save_user_answers(self.student, self.student_answers_update2, self.course_id)
answers = survey.get_answers(self.student)
self.assertEquals(len(list(answers.keys())), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(answers[self.student.id], self.student_answers_update2)
def test_limit_num_users(self):
"""
Verify that the limit_num_users parameter to get_answers()
works as intended
"""
survey = self._create_test_survey()
survey.save_user_answers(self.student, self.student_answers, self.course_id)
survey.save_user_answers(self.student2, self.student2_answers, self.course_id)
# even though we have 2 users submitted answers
# limit the result set to just 1
all_answers = survey.get_answers(limit_num_users=1)
self.assertEquals(len(list(all_answers.keys())), 1)
def test_get_field_names(self):
"""
Create a new survey and add answers to it
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, self.course_id)
survey.save_user_answers(self.student2, self.student2_answers, self.course_id)
names = survey.get_field_names()
self.assertEqual(sorted(names), ['ddl', 'field1', 'field2'])
def test_retire_user_successful(self):
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, self.course_id)
survey.save_user_answers(self.student2, self.student2_answers, self.course_id)
retire_result = SurveyAnswer.retire_user(self.student.id)
self.assertTrue(retire_result)
answers = survey.get_answers(self.student)
blanked_out_student_answser = {key: '' for key in self.student_answers}
self.assertEquals(answers[self.student.id], blanked_out_student_answser)
self.assertEquals(survey.get_answers(self.student2)[self.student2.id], self.student2_answers)
def test_retire_user_not_exist(self):
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, self.course_id)
retire_result = SurveyAnswer.retire_user(self.student2.id)
self.assertFalse(retire_result)
answers = survey.get_answers(self.student)
self.assertEquals(answers[self.student.id], self.student_answers)
| agpl-3.0 |
watonyweng/nova | nova/objects/instance_action.py | 23 | 8937 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceAction(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'action': fields.StringField(nullable=True),
'instance_uuid': fields.UUIDField(nullable=True),
'request_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'start_time': fields.DateTimeField(nullable=True),
'finish_time': fields.DateTimeField(nullable=True),
'message': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, action, db_action):
for field in action.fields:
action[field] = db_action[field]
action._context = context
action.obj_reset_changes()
return action
@staticmethod
def pack_action_start(context, instance_uuid, action_name):
values = {'request_id': context.request_id,
'instance_uuid': instance_uuid,
'user_id': context.user_id,
'project_id': context.project_id,
'action': action_name,
'start_time': context.timestamp}
return values
@staticmethod
def pack_action_finish(context, instance_uuid):
values = {'request_id': context.request_id,
'instance_uuid': instance_uuid,
'finish_time': timeutils.utcnow()}
return values
@base.remotable_classmethod
def get_by_request_id(cls, context, instance_uuid, request_id):
db_action = db.action_get_by_request_id(context, instance_uuid,
request_id)
if db_action:
return cls._from_db_object(context, cls(), db_action)
@base.remotable_classmethod
def action_start(cls, context, instance_uuid, action_name,
want_result=True):
values = cls.pack_action_start(context, instance_uuid, action_name)
db_action = db.action_start(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_action)
@base.remotable_classmethod
def action_finish(cls, context, instance_uuid, want_result=True):
values = cls.pack_action_finish(context, instance_uuid)
db_action = db.action_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_action)
@base.remotable
def finish(self):
values = self.pack_action_finish(self._context, self.instance_uuid)
db_action = db.action_finish(self._context, values)
self._from_db_object(self._context, self, db_action)
@base.NovaObjectRegistry.register
class InstanceActionList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceAction <= version 1.1
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('InstanceAction'),
}
# NOTE(danms): InstanceAction was at 1.1 before we added this
obj_relationships = {
'objects': [('1.0', '1.1')]
}
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_actions = db.actions_get(context, instance_uuid)
return base.obj_make_list(context, cls(), InstanceAction, db_actions)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceActionEvent(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: event_finish_with_failure decorated with serialize_args
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'event': fields.StringField(nullable=True),
'action_id': fields.IntegerField(nullable=True),
'start_time': fields.DateTimeField(nullable=True),
'finish_time': fields.DateTimeField(nullable=True),
'result': fields.StringField(nullable=True),
'traceback': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, event, db_event):
for field in event.fields:
event[field] = db_event[field]
event._context = context
event.obj_reset_changes()
return event
@staticmethod
def pack_action_event_start(context, instance_uuid, event_name):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'start_time': timeutils.utcnow()}
return values
@staticmethod
def pack_action_event_finish(context, instance_uuid, event_name,
exc_val=None, exc_tb=None):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
if exc_tb is None:
values['result'] = 'Success'
else:
values['result'] = 'Error'
values['message'] = exc_val
values['traceback'] = exc_tb
return values
@base.remotable_classmethod
def get_by_id(cls, context, action_id, event_id):
db_event = db.action_event_get_by_id(context, action_id, event_id)
return cls._from_db_object(context, cls(), db_event)
@base.remotable_classmethod
def event_start(cls, context, instance_uuid, event_name, want_result=True):
values = cls.pack_action_event_start(context, instance_uuid,
event_name)
db_event = db.action_event_start(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_event)
@base.serialize_args
@base.remotable_classmethod
def event_finish_with_failure(cls, context, instance_uuid, event_name,
exc_val=None, exc_tb=None, want_result=None):
values = cls.pack_action_event_finish(context, instance_uuid,
event_name, exc_val=exc_val,
exc_tb=exc_tb)
db_event = db.action_event_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_event)
@base.remotable_classmethod
def event_finish(cls, context, instance_uuid, event_name,
want_result=True):
return cls.event_finish_with_failure(context, instance_uuid,
event_name, exc_val=None,
exc_tb=None,
want_result=want_result)
@base.remotable
def finish_with_failure(self, exc_val, exc_tb):
values = self.pack_action_event_finish(self._context,
self.instance_uuid,
self.event, exc_val=exc_val,
exc_tb=exc_tb)
db_event = db.action_event_finish(self._context, values)
self._from_db_object(self._context, self, db_event)
@base.remotable
def finish(self):
self.finish_with_failure(self._context, exc_val=None, exc_tb=None)
@base.NovaObjectRegistry.register
class InstanceActionEventList(base.ObjectListBase, base.NovaObject):
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('InstanceActionEvent'),
}
obj_relationships = {
'objects': [('1.0', '1.0'), ('1.1', '1.1')],
}
@base.remotable_classmethod
def get_by_action(cls, context, action_id):
db_events = db.action_events_get(context, action_id)
return base.obj_make_list(context, cls(context),
objects.InstanceActionEvent, db_events)
| apache-2.0 |
bzhou26/NRA-Crawler | selenium/webdriver/common/utils.py | 63 | 2090 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Utils methods.
"""
import socket
def free_port():
"""
Determines a free port using sockets.
"""
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind(('0.0.0.0', 0))
free_socket.listen(5)
port = free_socket.getsockname()[1]
free_socket.close()
return port
def is_connectable(port):
"""
Tries to connect to the server at port to see if it is running.
:Args:
- port: The port to connect.
"""
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect(("127.0.0.1", port))
result = True
except socket.error:
result = False
finally:
socket_.close()
return result
def is_url_connectable(port):
"""
Tries to connect to the HTTP server at /status path
and specified port to see if it responds successfully.
:Args:
- port: The port to connect.
"""
try:
from urllib import request as url_request
except ImportError:
import urllib2 as url_request
try:
res = url_request.urlopen("http://127.0.0.1:%s/status" % port)
if res.getcode() == 200:
return True
else:
return False
except:
return False
| mit |
ScreamingUdder/mantid | Testing/SystemTests/tests/analysis/TobyFitResolutionSimulationTest.py | 3 | 5025 | #pylint: disable=no-init,invalid-name
"""Testing of the VATES quantification using
the TobyFitResolutionModel
"""
from stresstesting import MantidStressTest
from mantid.simpleapi import *
def create_cuboid_xml(xlength,ylength,zlength):
xml = """<cuboid id="sample0">
<left-front-bottom-point x="%(xpt)f" y="-%(ypt)f" z="-%(zpt)f" />
<left-front-top-point x="%(xpt)f" y="-%(ypt)f" z="%(zpt)f" />
<left-back-bottom-point x="-%(xpt)f" y="-%(ypt)f" z="-%(zpt)f" />
<right-front-bottom-point x="%(xpt)f" y="%(ypt)f" z="-%(zpt)f" />
</cuboid>
<algebra val="sample0" />
"""
return xml % {"xpt": xlength/2.0,"ypt":ylength/2.0,"zpt":zlength/2.0}
class TobyFitResolutionSimulationTest(MantidStressTest):
_success = False
def skipTests(self):
return False
def requiredMemoryMB(self):
return 16000
def runTest(self):
ei = 300.
bins = [-30,3,279]
temperature = 6.
chopper_speed = 600.
# Oriented lattice & goniometer.
alatt = 5.57
blatt = 5.51
clatt = 12.298
uvec = [9.700000e-03,9.800000e-03,9.996000e-01]
vvec = [9.992000e-01,-3.460000e-02,-4.580000e-02]
# sample dimensions
sx = 0.05 # Perp
sy = 0.025 # Up direction
sz = 0.04 # Beam direction
# Crystal mosaic
eta_sig = 4.0
fake_data = CreateSimulationWorkspace(Instrument='MERLIN',
BinParams=bins,UnitX='DeltaE',
DetectorTableFilename='MER06398.raw')
##
## Required log entries, can be taken from real ones by placing an instrument parameter of the same
## name pointing to the log name
##
AddSampleLog(Workspace=fake_data, LogName='Ei',LogText=str(ei), LogType="Number")
AddSampleLog(Workspace=fake_data, LogName='temperature_log',LogText=str(temperature), LogType="Number")
AddSampleLog(Workspace=fake_data, LogName='chopper_speed_log',LogText=str(chopper_speed), LogType="Number")
AddSampleLog(Workspace=fake_data, LogName='eta_sigma',LogText=str(eta_sig), LogType="Number")
##
## Sample shape
##
CreateSampleShape(InputWorkspace=fake_data, ShapeXML=create_cuboid_xml(sx,sy,sz))
##
## Chopper & Moderator models.
##
CreateModeratorModel(Workspace=fake_data,ModelType='IkedaCarpenterModerator',
Parameters="TiltAngle=32,TauF=2.7,TauS=0,R=0")
CreateChopperModel(Workspace=fake_data,ModelType='FermiChopperModel',
Parameters="AngularVelocity=chopper_speed_log,ChopperRadius=0.049,\
SlitThickness=0.0023,SlitRadius=1.3,Ei=Ei,JitterSigma=0.0")
##
## UB matrix
##
SetUB(Workspace=fake_data,a=alatt,b=blatt,c=clatt,u=uvec,v=vvec)
##
## Sample rotation. Simulate 1 run at zero degrees psi
##
psi = 0.0
AddSampleLog(Workspace=fake_data,LogName='psi',LogText=str(psi),LogType='Number')
SetGoniometer(Workspace=fake_data,Axis0="psi,0,1,0,1")
# Create the MD workspace
qscale = 'Q in A^-1'
fake_md = ConvertToMD(InputWorkspace=fake_data, QDimensions="Q3D", QConversionScales=qscale,
SplitInto=[3], SplitThreshold=100,MinValues="-15,-15,-15,-30",
MaxValues="25,25,25,279",OverwriteExisting=True)
# Run the simulation.
resol_model = "TobyFitResolutionModel"
xsec_model = "Strontium122"
# Use sobol & restart each pixel to ensure reproducible result
parameters = "Seff=0.7,J1a=38.7,J1b=-5.0,J2=27.3,SJc=10.0,GammaSlope=0.08,MultEps=0,TwinType=0,MCLoopMin=10,MCLoopMax=10,MCType=1"
simulated = SimulateResolutionConvolvedModel(InputWorkspace=fake_md,
ResolutionFunction=resol_model,
ForegroundModel=xsec_model,
Parameters=parameters)
# Take a slice
slice_ws = BinMD( InputWorkspace=simulated,
AlignedDim0='[H,0,0], -12.000000, 9.000000, 100',
AlignedDim1='[0,K,0], -6.000000, 7.000000, 100',
AlignedDim2='[0,0,L], 0.000000, 6.000000, 1',
AlignedDim3='DeltaE, 100.000000, 150.000000, 1')
# Check
ref_file = LoadMD(Filename='TobyFitResolutionSimulationTest.nxs')
result = CompareWorkspaces(Workspace1=slice_ws,
Workspace2=ref_file,
Tolerance=1e-08)
self._success = result[0]
if not self._success:
SaveMD(InputWorkspace=slice_ws,
Filename='TobyFitResolutionSimulationTest-mismatch.nxs')
def validate(self):
return self._success
| gpl-3.0 |
mfazliazran/raft | core/crawler/SpiderConfig.py | 11 | 4959 | #
# Author: Gregory Fleischer (gfleischer@gmail.com)
#
# Copyright (c) 2011 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
#
from PyQt4.QtCore import Qt, QObject, SIGNAL, QUrl
import json
import re
from urllib import parse as urlparse
class SpiderConfig(QObject):
def __init__(self, framework, parent = None):
QObject.__init__(self, parent)
self.framework = framework
self.re_dangerous_path = None
self.re_media_extension = None
self.default_dangerous_paths = 'delete|remove|destroy'
self.default_media_extensions = 'wmv,mp3,mp4,mpa,gif,jpg,jpeg,png'
self.framework.subscribe_raft_config_populated(self.configuration_populated)
self.framework.subscribe_raft_config_updated(self.configuration_updated)
def configuration_populated(self):
self.fill_spider_configuration(self.framework.get_raft_config_value('SPIDER', str))
def configuration_updated(self, name, value):
if name == 'SPIDER':
self.fill_spider_configuration(value)
def config_value_or_default(self, obj, config_name, default_value):
if config_name in obj:
return obj[config_name]
elif default_value is not None:
return default_value
else:
return ''
def fill_spider_configuration(self, configuration):
if configuration:
obj = json.loads(configuration)
else:
obj = {}
self.submit_forms = bool(self.config_value_or_default(obj, 'submit_forms', True))
self.use_data_bank = bool(self.config_value_or_default(obj, 'use_data_bank', True))
self.submit_user_name_password = bool(self.config_value_or_default(obj, 'submit_user_name_password', True))
self.evaluate_javascript = bool(self.config_value_or_default(obj, 'evaluate_javascript', True))
self.iterate_user_agents = bool(self.config_value_or_default(obj, 'iterate_user_agents', True))
self.retrieve_media_files = bool(self.config_value_or_default(obj, 'retrieve_media_files', True))
self.exclude_dangerous_paths = bool(self.config_value_or_default(obj, 'exclude_dangerous_paths', False))
self.dangerous_path = str(self.config_value_or_default(obj, 'dangerous_path', self.default_dangerous_paths))
self.max_links = int(self.config_value_or_default(obj, 'max_links', 8192))
self.max_link_depth = int(self.config_value_or_default(obj, 'max_link_depth', 6))
self.max_children = int(self.config_value_or_default(obj, 'max_children', 256))
self.max_unique_parameters = int(self.config_value_or_default(obj, 'max_unique_parameters', 16))
self.redundant_content_limit = int(self.config_value_or_default(obj, 'redundant_content_limit', 128))
self.redundant_structure_limit = int(self.config_value_or_default(obj, 'redundant_structure_limit', 256))
self.media_extensions = str(self.config_value_or_default(obj, 'media_extensions', self.default_media_extensions))
if self.exclude_dangerous_paths:
try:
self.re_dangerous_path = re.compile(self.dangerous_path, re.I)
except re.error as error:
self.log_warning('Failed to compile RE [%s]: %s' (self.dangerous_path, error))
# TODO: or should this fail completely?
self.re_dangerous_path = re.compile(self.default_dangerous_paths, re.I)
if not self.retrieve_media_files:
try:
self.re_media_extensions = self.make_media_extension_re(self.media_extensions)
except re.error as error:
self.re_media_extensions = self.make_media_extension_re(self.default_media_extensions)
def make_media_extension_re(self, extension_string):
if extension_string is None or '' == extension_string:
extension_string = self.default_media_extensions
extensions = []
for extension in self.media_extensions.split(','):
if extension.startswith('\\.'):
extensions.append(extension)
pass
elif extension.startswith('.'):
extensions.append(re.escape(extension))
else:
extensions.append('\.'+re.escape(extension))
return re.compile('^(?:%s)$' % '|'.join(extensions))
| gpl-3.0 |
linsalrob/EdwardsLab | trees/trim_alignment.py | 1 | 4393 | import argparse
import os
import sys
import re
__author__ = 'Rob Edwards'
"""
Trim an alignment file (in phylip format) based on the contents of the columns
"""
def parse_phylip_file(filename):
"""
Parse the phylip alignment file
:param filename:The file to parse
:type filename: str
:return: Two lists, one of the sequence ids and one of the alignments
:rtype: list, list
"""
seqids = []
alignmentdata = []
with open(filename, 'r') as f:
l = f.readline()
nalignments, alignlen = map(int, l.strip().split())
currentline = 0
for l in f:
# sys.stderr.write("Current line: {} {}\n".format(currentline, l.strip()))
# if currentline == nalignments - 1:
# currentline += 1
# continue
if currentline == nalignments:
currentline = 0
continue
if not l.startswith(' '):
m = re.match('^\S+', l)
if m:
seqids.append(m.group())
else:
sys.stderr.write("Can't find a sequence ID at line {} : {}\n".format(currentline, l))
l = re.sub('^\S+', '', l)
alignmentdata.append("")
alignmentdata[currentline] += l.strip().replace(' ', '')
currentline += 1
for a in alignmentdata:
if len(a) != alignlen:
sys.stderr.write("Alignment {} has length, but should be {}\n".format(len(a), alignlen))
return seqids, alignmentdata
def print_alignment(seqids, alignment, linelength=65):
"""
Print the alignment in phylip format
:param seqids: The list of sequence ids
:type seqids: list of str
:param alignment: The list of alignment strings
:type alignment: list of str
:param linelength: the desired line length to print (default = 80)
:type linelength: int
:return:
:rtype:
"""
alignmentlen = len(alignment[0])
naligns = len(alignment)
lastend = 0
print(" {} {}".format(naligns, alignmentlen))
for i in range(len(seqids)):
# print the sequence id and the correct number of spaces. max spaces = 11
nspaces = 11-len(seqids[i])
spaces = nspaces * ' '
sys.stdout.write('{}{}'.format(seqids[i], spaces))
end = 0
for p in range(1, 6):
start = end
end = p * 10
sys.stdout.write(alignment[i][start:end] + " ")
sys.stdout.write("\n")
lastend = end
sys.stdout.write("\n")
# now we just need to write out the rest of the data
while lastend <= alignmentlen:
for a in alignment:
end = lastend
sys.stdout.write(11 * ' ')
for p in range(1, 6):
start = end
end = (p * 10) + lastend
sys.stdout.write(a[start:end] + " ")
# sys.stderr.write("{} to {}\n".format(start, end))
sys.stdout.write("\n")
sys.stdout.write("\n")
lastend = end
def trim_alignment(alignment, cutoff):
"""
Trim the alignment based on the number of informative residues
:param alignment: The list of alignment strings
:type alignment: list of str
:param cutoff: The cutoff for informative residues
:type cutoff: float
:return: The revised list of alignments
:rtype: list of str
"""
alignmentlen = len(alignment[0])
naligns = len(alignment)
keepposn = []
for i in range(alignmentlen):
non_ir = 0 # non informative residues (i.e. '-')
for a in alignment:
if a[i] == '-':
non_ir += 1
if (1.0 * (naligns - non_ir) / naligns) >= cutoff:
keepposn.append(i)
newalignment = []
for a in alignment:
newalignment.append(''.join([a[i] for i in keepposn]))
return newalignment
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Trim an alignment file')
parser.add_argument('-p', help='phylip alignment file', required=True)
parser.add_argument('-c', help='minimum coverage as a fraction', required=True, type=float)
args=parser.parse_args()
seqids, alignment = parse_phylip_file(args.p)
newalignment = trim_alignment(alignment, args.c)
# newalignment = alignment
print_alignment(seqids, newalignment)
| mit |
StefanRijnhart/odoomrp-wip | mrp_project_link/models/mrp_production.py | 4 | 5630 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, _
class MrpProduction(models.Model):
_inherit = 'mrp.production'
project_id = fields.Many2one("project.project", string="Project")
analytic_account_id = fields.Many2one(
"account.analytic.account", string="Analytic Account")
@api.one
@api.onchange('project_id')
def onchange_project_id(self):
self.analytic_account_id = self.project_id.analytic_account_id
@api.multi
def action_in_production(self):
task_obj = self.env['project.task']
for record in self:
task_domain = [('mrp_production_id', '=', record.id),
('wk_order', '=', False)]
tasks = task_obj.search(task_domain)
if not tasks:
task_name = ("%s:: [%s]%s") % (record.name,
record.product_id.default_code,
record.product_id.name)
task_descr = _("""
Manufacturing Order: %s
Product to Produce: [%s]%s
Quantity to Produce: %s
Bill of Material: %s
Planned Date: %s
""") % (record.name, record.product_id.default_code,
record.product_id.name, record.product_qty,
record.bom_id.name, record.date_planned)
task_values = {
'mrp_production_id': record.id,
'user_id': record.user_id.id,
'reviewer_id': record.user_id.id,
'name': task_name,
'project_id': record.project_id.id,
'description': task_descr
}
if 'code' in task_values.keys():
task_values.pop('code')
task_obj.create(task_values)
return super(MrpProduction, self).action_in_production()
@api.multi
def action_confirm(self):
procurement_obj = self.env['procurement.order']
mto_record = self.env.ref('stock.route_warehouse0_mto')
result = super(MrpProduction, self).action_confirm()
for record in self:
if record.project_id:
main_project = record.project_id.id
for move in record.move_lines:
if mto_record in move.product_id.route_ids:
move.main_project_id = main_project
procurements = procurement_obj.search(
[('move_dest_id', '=', move.id)])
procurements.write({'main_project_id': main_project})
procurements.refresh()
procurements.set_main_project()
return result
class MrpProductionWorkcenterLine(models.Model):
_inherit = 'mrp.production.workcenter.line'
@api.multi
def action_start_working(self):
task_obj = self.env['project.task']
res = super(MrpProductionWorkcenterLine, self).action_start_working()
for record in self:
task_domain = [('mrp_production_id', '=', record.production_id.id),
('wk_order', '=', False)]
production_tasks = task_obj.search(task_domain)
task_descr = _("""
Manufacturing Order: %s
Work Order: %s
Workcenter: %s
Cycle: %s
Hour: %s
""") % (record.production_id.name, record.name,
record.workcenter_id.name, record.cycle, record.hour)
task_values = {
'mrp_production_id': record.production_id.id,
'wk_order': record.id,
'user_id': False,
'reviewer_id': record.production_id.user_id.id,
'description': task_descr,
'project_id': record.production_id.project_id.id,
'parent_ids': [(6, 0, production_tasks.ids)]
}
if record.routing_wc_line.operation:
count = record.routing_wc_line.operation.op_number
for i in range(count):
task_name = (_("%s:: WO%s-%s:: %s") %
(record.production_id.name,
str(record.sequence).zfill(3),
str(i).zfill(3), record.name))
task_values['name'] = task_name
if 'code' in task_values.keys():
task_values.pop('code')
task_obj.create(task_values)
return res
class MrpProductionProductLine(models.Model):
_inherit = 'mrp.production.product.line'
task_id = fields.Many2one('project.task', string="Task")
| agpl-3.0 |
shrenikgala/servo | python/mach_bootstrap.py | 13 | 3022 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function, unicode_literals
import os
import platform
import sys
SEARCH_PATHS = [
"python/mach",
"python/toml",
"python/mozinfo",
"python/mozdebug",
]
# Individual files providing mach commands.
MACH_MODULES = [
'python/servo/bootstrap_commands.py',
'python/servo/build_commands.py',
'python/servo/testing_commands.py',
'python/servo/post_build_commands.py',
'python/servo/devenv_commands.py',
]
CATEGORIES = {
'bootstrap': {
'short': 'Bootstrap Commands',
'long': 'Bootstrap the build system',
'priority': 90,
},
'build': {
'short': 'Build Commands',
'long': 'Interact with the build system',
'priority': 80,
},
'post-build': {
'short': 'Post-build Commands',
'long': 'Common actions performed after completing a build.',
'priority': 70,
},
'testing': {
'short': 'Testing',
'long': 'Run tests.',
'priority': 60,
},
'devenv': {
'short': 'Development Environment',
'long': 'Set up and configure your development environment.',
'priority': 50,
},
'build-dev': {
'short': 'Low-level Build System Interaction',
'long': 'Interact with specific parts of the build system.',
'priority': 20,
},
'misc': {
'short': 'Potpourri',
'long': 'Potent potables and assorted snacks.',
'priority': 10,
},
'disabled': {
'short': 'Disabled',
'long': 'The disabled commands are hidden by default. Use -v to display them. These commands are unavailable for your current context, run "mach <command>" to see why.',
'priority': 0,
}
}
def bootstrap(topdir):
topdir = os.path.abspath(topdir)
# Ensure we are running Python 2.7+. We put this check here so we generate a
# user-friendly error message rather than a cryptic stack trace on module
# import.
if sys.version_info[0] != 2 or sys.version_info[1] < 7:
print('Python 2.7 or above (but not Python 3) is required to run mach.')
print('You are running Python', platform.python_version())
sys.exit(1)
def populate_context(context, key=None):
if key is None:
return
if key == 'topdir':
return topdir
raise AttributeError(key)
sys.path[0:0] = [os.path.join(topdir, path) for path in SEARCH_PATHS]
import mach.main
mach = mach.main.Mach(os.getcwd())
mach.populate_context_handler = populate_context
for category, meta in CATEGORIES.items():
mach.define_category(category, meta['short'], meta['long'],
meta['priority'])
for path in MACH_MODULES:
mach.load_commands_from_file(os.path.join(topdir, path))
return mach
| mpl-2.0 |
Atomistica/user-gfmd | tests/TEST_Hertz_fcc100_128x128/eval.py | 2 | 2275 | # ======================================================================
# USER-GFMD - Elastic half-space methods for LAMMPS
# https://github.com/Atomistica/user-gfmd
#
# Copyright (2011-2016,2021)
# Lars Pastewka <lars.pastewka@imtek.uni-freiburg>,
# Tristan A. Sharp and others.
# See the AUTHORS file in the top-level USER-GFMD directory.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
#! /usr/bin/env python
import glob
from math import pi, sqrt
import sys
import numpy as np
###
R = 100.0
#G = 1.0
#nu = 0.3
#E = 2*G*(1+nu)/(1-nu*nu)
E = 1.39 # Contact modulus
###
fns = glob.glob('gfmd.*.r.f2.out')
fns.remove('gfmd.0.r.f2.out')
if len(fns) > 1:
raise RuntimeError('More than one GFMD output found. Not sure which one to use.')
f_xy = np.loadtxt(fns[0])
nx, ny = f_xy.shape
###
r0 = 3.0
rbins = [ r0 ]
r2 = r0
while r2 < nx/4:
r2 = sqrt(r2*r2+r0*r0)
rbins += [ r2 ]
###
x = np.arange(nx)+0.5
x = np.where(x > nx/2, x-nx, x)
y = np.arange(ny)+0.5
y = np.where(y > ny/2, y-ny, y)
r_xy = np.sqrt( (x**2).reshape(-1,1) + (y**2).reshape(1,-1) )
### Pressure as a function of distance
N = np.sum(f_xy)
a = R*(3./4*( N/(E*R**2) ))**(1./3)
p0 = 3*N/(2*pi*a*a)
### Compute residual
fa_xy = np.where(r_xy<a, p0*np.sqrt(1-(r_xy/a)**2), np.zeros_like(r_xy))
res = np.sum( (f_xy - fa_xy)**2 )
if res > 1e-2:
raise RuntimeError('Residual outside bounds: res = %f' % res)
###
if len(sys.argv) == 2 and sys.argv[1] == '--dump':
print('Residual: ', res)
print('Dumping f.out...')
np.savetxt('f.out', np.transpose([r_xy.reshape(-1), f_xy.reshape(-1), fa_xy.reshape(-1)]))
| gpl-2.0 |
nukeop/codexscripts | scripts/brofister.py | 1 | 2186 | import argparse
import requests
from gooey import Gooey, GooeyParser
from tqdm import tqdm
from collections import OrderedDict
from login import login
from users import collect_post_ids
RATINGS = [("Brofist", 1), ("Agree", 2), ("Disagree", 3), ("Funny", 4), ("Salute", 5), ("Informative", 6),
("Friendly", 7), ("Fabulous", 9), ("Creative", 10), ("Old", 11), ("Bad Spelling", 12), ("Dumb", 13),
("Prestigious", 15), ("butthurt", 16), ("Racist", 17), ("Thanks!", 18), (",M (Parrot)", 19),
("Acknowledge this user's Agenda", 20), ("prosper", 21), ("Negative", 22), ("Doggy", 23),
("Excited!", 24), ("Shit", 25), ("Rage", 26), ("Citation Needed", 27), ("Undo rating", "del")]
# use an ordered dict to maintain order when converting to choices array
RATINGS = OrderedDict(RATINGS)
# Find all posts of a user and rate all of them.
@Gooey
def main():
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Brofist all posts of a user.')
parser.add_argument('username', help="Your Codex username")
parser.add_argument('password', help="Your Codex password")
parser.add_argument('usertofist', help="User you want to rate")
parser.add_argument('rating', choices=[k for k in RATINGS], help="Rating you want to give")
args = parser.parse_args()
session, data = login(args.username, args.password)
ids = sorted(list(set(collect_post_ids(session, data, args.usertofist, "index.php?search/search", godeeper=True))))
# Generate rate links via list comprehension
rating_id = RATINGS[args.rating]
ratelinks = ["http://www.rpgcodex.net/forums/index.php?posts/{0}/rate&rating={1}&_xfToken={2}".format(x, rating_id, data["_xfToken"]) for x in ids]
# Set some data to enable responses in json
data["users"] = None
del data["users"]
data["_xfResponseType"] = "json"
data["_xfNoRedirect"] = 1
# Engage fisting
for link in tqdm(ratelinks):
data["_xfRequestUri"] = link[23:]
print "Fisting post in {}".format(link)
session.post(link, data=data)
main()
| gpl-3.0 |
ddcrjlalumiere/pyvmomi-community-samples | samples/suds-to-pyvmomi.py | 13 | 4418 | #!/usr/bin/env python
import argparse
import getpass
import suds
import pyVim.connect as connect
# suds-to-pyvmomi.py
#
# Some projects will want to incorporate pyVmomi into suds based projects. This
# sample shows how to take a suds cookie and inject it into pyVmomi so you may
# use pyVmomi and suds along side each other.
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--host',
required=True,
action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port',
required=False,
action='store',
help="port to use, default 443",
default=443)
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
args = parser.parse_args()
if args.password:
password = args.password
else:
password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
url = "https://%s/sdk/vimService.wsdl" % args.host
print "Python suds..."
client = suds.client.Client(url, location=url)
si = suds.sudsobject.Property("ServiceInstance")
si._type = "ServiceInstance"
sc = client.service.RetrieveServiceContent(si)
client.service.Login(sc.sessionManager,
userName=args.user,
password=password)
def get_current_session(client):
property_filter_spec = client.factory.create('ns0:PropertyFilterSpec')
property_spec = client.factory.create('ns0:PropertySpec')
property_spec.pathSet = ['currentSession']
property_spec.type = "SessionManager"
property_filter_spec.propSet = [property_spec]
object_spec = client.factory.create('ns0:ObjectSpec')
object_spec.obj = sc.sessionManager
object_spec.skip = False
property_filter_spec.objectSet = [object_spec]
options = client.factory.create('ns0:RetrieveOptions')
options.maxObjects = 1
results = client.service.RetrievePropertiesEx(sc.propertyCollector,
specSet=[
property_filter_spec],
options=options)
def get_property(self, name):
for obj in self.objects:
if not hasattr(obj, 'propSet'):
return None
for prop in obj.propSet:
if prop.name == name:
return prop.val
results.__class__.get_property = get_property
return results.get_property('currentSession')
current_session = get_current_session(client)
if current_session:
print "current session id: %s" % current_session.key
cookies = client.options.transport.cookiejar
for cookie in cookies:
print "cookie '%s' contents: %s" % (cookie.name, cookie.value)
else:
print "not logged in"
raise RuntimeError("this sample doesn't work if you can't authenticate")
# now to move the current session ID over to pyVmomi
VMWARE_COOKIE_NAME = 'vmware_soap_session'
def extract_vmware_cookie_suds(client):
cookiejar = client.options.transport.cookiejar
for cookie in cookiejar:
if cookie.name == VMWARE_COOKIE_NAME:
return '%s=%s' % (cookie.name, cookie.value)
# dynamically inject this method into the suds client:
client.__class__.extract_vmware_cookie = extract_vmware_cookie_suds
print "=" * 80
print "suds session to pyvmomi "
# Unfortunately, you can't connect without a login in pyVmomi
si = connect.SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
# logout the current session since we won't be using it.
si.content.sessionManager.Logout()
# inject the pyVmomi stub with the suds cookie values...
si._stub.cookie = client.extract_vmware_cookie()
print "current suds session id: "
print get_current_session(client).key
print
print "current pyVmomi session id: %s"
print si.content.sessionManager.currentSession.key
print
# always clean up your sessions:
si.content.sessionManager.Logout()
| apache-2.0 |
Antiun/odoo | addons/hr_contract/hr_contract.py | 302 | 5377 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id','=',emp.id),], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
def _contracts_count(self, cr, uid, ids, field_name, arg, context=None):
Contract = self.pool['hr.contract']
return {
employee_id: Contract.search_count(cr, SUPERUSER_ID, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth'),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle'),
'vehicle_distance': fields.integer('Home-Work Dist.', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id': fields.function(_get_latest_contract, string='Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
'contracts_count': fields.function(_contracts_count, type='integer', string='Contracts'),
}
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_columns = {
'name': fields.char('Contract Type', required=True),
}
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_columns = {
'name': fields.char('Contract Reference', required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.related('employee_id','department_id', type='many2one', relation='hr.department', string="Department", readonly=True),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar','Working Schedule'),
'wage': fields.float('Wage', digits=(16,2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', required=False, readonly=False),
'visa_no': fields.char('Visa No', required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [('name', '=', 'Employee')])
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type
}
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
if not employee_id:
return {'value': {'job_id': False}}
emp_obj = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
job_id = False
if emp_obj.job_id:
job_id = emp_obj.job_id.id
return {'value': {'job_id': job_id}}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['date_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! Contract start-date must be less than contract end-date.', ['date_start', 'date_end'])
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40223137/150601 | static/Brython3.1.3-20150514-095342/Lib/_markupbase.py | 891 | 14598 | """Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in {"attlist", "linktype", "link", "element"}:
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in {"if", "else", "endif"}:
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in {"attlist", "element", "entity", "notation"}:
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
| agpl-3.0 |
facebookexperimental/eden | eden/hg-server/tests/test-metalog-migration-t.py | 1 | 2142 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import
from testutil.autofix import eq
from testutil.dott import feature, sh, testtmp # noqa: F401
def backup():
"""Backup .hg/store/{bookmarks,remotenames}"""
for name in ["bookmarks", "remotenames"]:
path = ".hg/store/%s" % name
sh.cp(path, "%s.bak" % path)
def restore():
"""Rewrite .hg/store/{bookmarks,remotenames} with backup"""
for name in ["bookmarks", "remotenames"]:
path = ".hg/store/%s" % name
sh.cp("%s.bak" % path, path)
def setbookmarks(name):
"""Set bookmarks to specified commit"""
sh.hg("bookmark", "book", "-r", "desc(%s)" % name)
sh.hg("debugremotebookmark", "remotebook", "desc(%s)" % name)
def listbookmarks():
"""List local and remote bookmarks"""
local = sh.hg("log", "-r", sh.hg("bookmarks", "-T", "{node}"), "-T{desc}")
remote = sh.hg(
"log", "-r", sh.hg("bookmarks", "--remote", "-T", "{node}"), "-T{desc}"
)
return [local, remote]
sh.newrepo()
sh.setconfig("experimental.metalog=0")
sh.enable("remotenames")
(
sh % "drawdag"
<< r"""
C
|
B
|
A
"""
)
# Prepare bookmarks and remotenames. Set them to A in backup, and B on disk.
setbookmarks("A")
backup()
setbookmarks("B")
# Test migrating from disk to metalog.
# They should migrate "B" from disk to metalog and use it.
sh.setconfig("experimental.metalog=1")
eq(listbookmarks(), ["B", "B"])
# Metalog is the source of truth. Changes to .hg/store are ignored.
restore()
eq(listbookmarks(), ["B", "B"])
# Test migrating from metalog to disk.
# Metalog is not the source of truth. Changes to .hg/store are effective.
sh.setconfig("experimental.metalog=0")
setbookmarks("C")
eq(listbookmarks(), ["C", "C"])
restore()
eq(listbookmarks(), ["A", "A"])
# Migrate up again.
# At this time metalog should import "A" from disk to metalog, instead of
# using "B" that exists in metalog.
sh.setconfig("experimental.metalog=1")
eq(listbookmarks(), ["A", "A"])
| gpl-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/web/test/test_distrib.py | 3 | 15220 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
from twisted.test import proto_helpers
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild("there", static.Data("root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild("here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
d = client.getPage("http://127.0.0.1:%d/here/there" % \
self.port2.getHost().port)
d.addCallback(self.assertEqual, 'root')
return d
def _setupDistribServer(self, child):
"""
Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site.
"""
distribRoot = resource.Resource()
distribRoot.putChild("child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{getPage} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
mainPort, mainAddr = self._setupDistribServer(child)
return client.getPage("http://%s:%s/child" % (
mainAddr.host, mainAddr.port), **kwargs)
def _requestAgentTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with a tuple consisting of a
L{twisted.test.proto_helpers.AccumulatingProtocol} containing the
body of the response and an L{IResponse} with the response itself.
"""
mainPort, mainAddr = self._setupDistribServer(child)
d = client.Agent(reactor).request("GET", "http://%s:%s/child" % (
mainAddr.host, mainAddr.port), **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
class ReportRequestHeaders(resource.Resource):
def render(self, request):
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return ""
request = self._requestTest(
ReportRequestHeaders(), headers={'foo': 'bar'})
def cbRequested(result):
self.assertEqual(requestHeaders['Foo'], ['bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
"""
The response code can be set by the request object passed to a
distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, "")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, "OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
"""
The response code and message can be set by the request object passed to
a distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, "some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, "")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, "some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write('x' * SIZE_LIMIT + 'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, 'x' * SIZE_LIMIT + 'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return 'x' * SIZE_LIMIT + 'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, 'x' * SIZE_LIMIT + 'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
self.assertEqual(len(self.flushLoggedErrors(pb.NoSuchMethod)), 1)
d.addCallback(cbRendered)
return d
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent("")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
| mit |
carlgao/lenga | images/lenny64-peon/usr/share/python-support/python-django/django/contrib/gis/gdal/srs.py | 30 | 11716 | """
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print srs
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print srs.proj
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print srs.ellipsoid
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print srs.projected, srs.geographic
False True
>>> srs.import_epsg(32140)
>>> print srs.name
NAD83 / Texas South Central
"""
import re
from ctypes import byref, c_char_p, c_int, c_void_p
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException, SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL website,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
buf = c_char_p('')
srs_type = 'user'
if isinstance(srs_input, basestring):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, unicode):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, (int, long)):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr: capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]')
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print srs['GEOGCS']
WGS 84
>>> print srs['DATUM']
WGS_1984
>>> print srs['AUTHORITY']
EPSG
>>> print srs['AUTHORITY', 1] # The authority value
4326
>>> print srs['TOWGS84', 4] # the fourth value in this wkt
0
>>> print srs['UNIT|AUTHORITY'] # For the units authority, have to use the pipe symbole.
EPSG
>>> print srs['UNIT|AUTHORITY', 1] # The authority value for the untis
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, basestring) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, target, index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, target)
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, target)
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected: return self.attr_value('PROJCS')
elif self.geographic: return self.attr_value('GEOGCS')
elif self.local: return self.attr_value('LOCAL_CS')
else: return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
if self.projected or self.local:
return capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
return capi.angular_units(self.ptr, byref(c_char_p()))
else:
return (None, None)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, user_input)
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr: capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| mit |
matsv339/Cast-Away | assets/libs/bower_components/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| mit |
ESSolutions/ESSArch_Core | ESSArch_Core/fixity/validation/backends/xml.py | 1 | 22555 | import copy
import logging
import os
from os import walk
import click
from django.utils import timezone
from lxml import etree, isoschematron
from ESSArch_Core.essxml.util import (
find_files,
find_pointers,
validate_against_schema,
)
from ESSArch_Core.exceptions import ValidationError
from ESSArch_Core.fixity.checksum import calculate_checksum
from ESSArch_Core.fixity.models import Validation
from ESSArch_Core.fixity.validation.backends.base import BaseValidator
from ESSArch_Core.util import normalize_path, win_to_posix
logger = logging.getLogger('essarch.fixity.validation.xml')
class DiffCheckValidator(BaseValidator):
"""
Validates the file against a given XML to see if is an entirely new file or
if it has been changed or renamed/moved.
The post validation checks if there are files that has been deleted after
the XML was generated.
"""
file_validator = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.context:
raise ValueError('A context (xml) is required')
self.context = normalize_path(self.context)
self.rootdir = self.options.get('rootdir')
self.recursive = self.options.get('recursive', True)
self.default_algorithm = self.options.get('default_algorithm', 'SHA-256')
self.initial_present = {} # Map checksum -> fname
self.initial_deleted = {} # Map checksum -> fname
self.sizes = {} # Map fname -> size
self.checksums = {} # Map fname -> checksum
self.checksum_algorithms = {} # Map fname -> checksum algorithm
self._get_files()
for logical in self.logical_files:
if self.rootdir is not None:
logical_path = os.path.join(logical.path)
else:
logical_path = logical.path
logical_path = win_to_posix(logical_path)
try:
self.initial_deleted[logical.checksum].append(logical_path)
except KeyError:
self.initial_deleted[logical.checksum] = [logical_path]
try:
self.initial_present[logical.checksum].append(logical_path)
except KeyError:
self.initial_present[logical.checksum] = [logical_path]
self.checksums[logical_path] = logical.checksum
self.checksum_algorithms[logical_path] = logical.checksum_type
self.sizes[logical_path] = logical.size
def _reset_dicts(self):
self.present = copy.deepcopy(self.initial_present)
self.deleted = copy.deepcopy(self.initial_deleted)
def _reset_counters(self):
self.confirmed = 0
self.added = 0
self.changed = 0
self.renamed = 0
def _get_files(self):
self.logical_files = find_files(self.context, rootdir=self.rootdir, recursive=self.recursive)
def _create_obj(self, filename, passed, msg):
return Validation(
filename=filename,
time_started=timezone.now(),
time_done=timezone.now(),
validator=self.__class__.__name__,
required=self.required,
task=self.task,
information_package_id=self.ip,
responsible=self.responsible,
message=msg,
passed=passed,
specification={
'context': self.context,
'options': self.options,
}
)
def _pop_checksum_dict(self, d, checksum, filepath):
checksum_list = d[checksum]
checksum_list.remove(filepath)
if not len(checksum_list):
d.pop(checksum)
def _get_filepath(self, input_file):
return input_file
def _get_checksum(self, input_file, relpath=None):
path = relpath or input_file
algorithm = self.checksum_algorithms.get(path) or self.default_algorithm
return calculate_checksum(input_file, algorithm=algorithm)
def _get_size(self, input_file):
return os.path.getsize(input_file)
def _validate(self, filepath):
relpath = normalize_path(os.path.relpath(self._get_filepath(filepath), self.rootdir))
newhash = self._get_checksum(filepath, relpath=relpath)
newsize = self._get_size(filepath)
try:
self._pop_checksum_dict(self.deleted, newhash, relpath)
except (KeyError, ValueError):
pass
if newhash in self.present:
try:
self._pop_checksum_dict(self.present, newhash, relpath)
except ValueError:
self.present[newhash].append(relpath)
return
else:
self.present[newhash] = [relpath]
if relpath not in self.checksums:
return
oldhash = self.checksums[relpath]
if oldhash is None:
self._pop_checksum_dict(self.deleted, oldhash, relpath)
self._pop_checksum_dict(self.present, oldhash, relpath)
self._pop_checksum_dict(self.present, newhash, relpath)
elif oldhash != newhash:
self.deleted.pop(oldhash, None)
self.changed += 1
msg = '{f} checksum has been changed: {old} != {new}'.format(f=relpath, old=oldhash, new=newhash)
logger.error(msg)
self._pop_checksum_dict(self.present, oldhash, relpath)
self._pop_checksum_dict(self.present, newhash, relpath)
return self._create_obj(relpath, False, msg)
oldsize = self.sizes[relpath]
if oldsize is not None and newsize is not None and oldsize != newsize:
self.deleted.pop(oldhash, None)
self.changed += 1
msg = '{f} size has been changed: {old} != {new}'.format(f=relpath, old=oldsize, new=newsize)
logger.error(msg)
return self._create_obj(relpath, False, msg)
self.confirmed += 1
msg = '{f} confirmed in xml'.format(f=relpath)
logger.debug(msg)
return self._create_obj(relpath, True, msg)
def _validate_deleted_files(self, objs):
delete_count = 0
for deleted_hash, deleted_hash_files in self.deleted.items():
present_hash_files = self.present.get(deleted_hash, [])
for f in present_hash_files[:]:
if f not in deleted_hash_files:
try:
old = deleted_hash_files.pop()
self.renamed += 1
msg = '{old} has been renamed to {new}'.format(old=old, new=f)
logger.error(msg)
objs.append(self._create_obj(old, False, msg))
present_hash_files.remove(old)
present_hash_files.remove(f)
except IndexError:
pass
for f in deleted_hash_files:
msg = '{file} has been deleted'.format(file=f)
logger.error(msg)
objs.append(self._create_obj(f, False, msg))
delete_count += 1
present_hash_files.remove(f)
if not len(present_hash_files):
self.present.pop(deleted_hash, None)
return delete_count
def _validate_present_files(self, objs):
for _present_hash, present_hash_files in self.present.items():
for f in present_hash_files:
self.added += 1
msg = '{f} is missing from {xml}'.format(f=f, xml=self.context)
logger.error(msg)
objs.append(self._create_obj(f, False, msg))
def validate(self, path, expected=None):
xmlfile = self.context
objs = []
self._reset_dicts()
self._reset_counters()
logger.debug('Validating {path} against {xml}'.format(path=path, xml=xmlfile))
if os.path.isdir(path):
for root, _dirs, files in walk(path):
for f in files:
filepath = normalize_path(os.path.join(root, f))
if filepath in self.exclude or filepath == xmlfile:
continue
objs.append(self._validate(filepath))
else:
objs.append(self._validate(path))
delete_count = self._validate_deleted_files(objs)
self._validate_present_files(objs)
objs = [o for o in objs if o is not None]
Validation.objects.bulk_create(objs, batch_size=100)
if delete_count + self.added + self.changed + self.renamed > 0:
msg = ('Diff-check validation of {path} against {xml} failed: '
'{cfmd} confirmed, {a} added, {c} changed, {r} renamed, {d} deleted').format(
path=path, xml=self.context, cfmd=self.confirmed, a=self.added, c=self.changed, r=self.renamed,
d=delete_count)
logger.warning(msg)
raise ValidationError(msg)
logger.info("Successful diff-check validation of {path} against {xml}".format(path=path, xml=self.context))
class XMLComparisonValidator(DiffCheckValidator):
def _get_files(self):
skip_files = [p.path for p in find_pointers(self.context)]
self.logical_files = find_files(
self.context,
rootdir=self.rootdir,
skip_files=skip_files,
recursive=self.recursive,
)
def _get_filepath(self, input_file):
return normalize_path(os.path.join(self.rootdir, input_file.path))
def _get_checksum(self, input_file, relpath=None):
return input_file.checksum
def _get_size(self, input_file):
return input_file.size
def validate(self, path, expected=None):
xmlfile = self.context
objs = []
self._reset_dicts()
self._reset_counters()
logger.debug('Validating {path} against {xml}'.format(path=path, xml=xmlfile))
checksum_in_context_file = self.checksums.get(path)
if checksum_in_context_file:
try:
self._pop_checksum_dict(self.deleted, checksum_in_context_file, path)
self._pop_checksum_dict(self.present, checksum_in_context_file, path)
except (KeyError, ValueError):
pass
skip_files = [os.path.relpath(xmlfile, self.rootdir)]
skip_files.extend([p.path for p in find_pointers(path)])
skip_files = list(map(normalize_path, skip_files))
for f in find_files(path, rootdir=self.rootdir, skip_files=skip_files, recursive=self.recursive):
if f in self.exclude:
continue
objs.append(self._validate(f))
delete_count = self._validate_deleted_files(objs)
self._validate_present_files(objs)
if checksum_in_context_file:
try:
self.deleted[checksum_in_context_file].append(path)
except KeyError:
self.deleted[checksum_in_context_file] = [path]
try:
self.present[checksum_in_context_file].append(path)
except KeyError:
self.present[checksum_in_context_file] = [path]
objs = [o for o in objs if o is not None]
Validation.objects.bulk_create(objs, batch_size=100)
if delete_count + self.added + self.changed + self.renamed > 0:
msg = ('Comparison of {path} against {xml} failed: '
'{cfmd} confirmed, {a} added, {c} changed, {r} renamed, {d} deleted').format(
path=path, xml=self.context, cfmd=self.confirmed, a=self.added, c=self.changed, r=self.renamed,
d=delete_count)
logger.warning(msg)
raise ValidationError(msg)
logger.info("Successful comparison of {path} against {xml}".format(path=path, xml=self.context))
class XMLSchemaValidator(BaseValidator):
def validate(self, filepath, expected=None):
if self.context:
logger.debug('Validating schema of {xml} against {schema}'.format(xml=filepath, schema=self.context))
else:
logger.debug('Validating schema of {xml}'.format(xml=filepath))
rootdir = self.options.get('rootdir')
etree.clear_error_log()
started = timezone.now()
relpath = os.path.relpath(filepath, rootdir)
try:
validate_against_schema(filepath, self.context, rootdir)
except etree.DocumentInvalid as e:
msg = 'Schema validation of {xml} failed'.format(xml=filepath)
logger.exception(msg)
done = timezone.now()
validation_objs = []
for error in e.error_log:
message = '{line}: {msg}'.format(line=error.line, msg=error.message)
validation_objs.append(Validation(
passed=False,
validator=self.__class__.__name__,
filename=relpath,
message=message,
time_started=started,
time_done=done,
information_package_id=self.ip,
task=self.task,
))
Validation.objects.bulk_create(validation_objs, 100)
raise ValidationError(msg, errors=[o.message for o in validation_objs])
except Exception as e:
msg = 'Unknown error during schema validation of {xml}'.format(xml=filepath)
logger.exception(msg)
done = timezone.now()
Validation.objects.create(
passed=False,
validator=self.__class__.__name__,
filename=relpath,
message=str(e),
time_started=started,
time_done=done,
information_package_id=self.ip,
task=self.task,
)
raise
Validation.objects.create(
passed=True,
validator=self.__class__.__name__,
filename=relpath,
time_started=started,
time_done=timezone.now(),
information_package_id=self.ip,
task=self.task,
)
logger.info("Successful schema validation of {xml}".format(xml=filepath))
@staticmethod
@click.command()
@click.option('--schema', metavar='INPUT', type=click.Path(exists=True), default=None)
@click.argument('path', metavar='INPUT', type=click.Path(exists=True))
def cli(path, schema):
validator = XMLSchemaValidator(context=schema)
try:
validator.validate(path)
click.echo('success!')
except ValidationError as e:
click.echo(e, err=True)
for error in e.errors:
click.echo(error, err=True)
class XMLSyntaxValidator(BaseValidator):
def validate(self, filepath, expected=None):
logger.debug('Validating syntax of {xml}'.format(xml=filepath))
etree.clear_error_log()
started = timezone.now()
try:
etree.parse(filepath)
except etree.XMLSyntaxError as e:
msg = 'Syntax validation of {xml} failed'.format(xml=filepath)
logger.exception(
msg
)
done = timezone.now()
validation_objs = []
for error in e.error_log:
message = '{line}: {msg}'.format(line=error.line, msg=error.message)
validation_objs.append(Validation(
passed=False,
validator=self.__class__.__name__,
filename=filepath,
message=message,
time_started=started,
time_done=done,
information_package_id=self.ip,
task=self.task,
))
Validation.objects.bulk_create(validation_objs, 100)
raise ValidationError(msg, errors=[o.message for o in validation_objs])
except Exception as e:
logger.exception('Unknown error during syntax validation of {xml}'.format(xml=filepath))
done = timezone.now()
Validation.objects.create(
passed=False,
validator=self.__class__.__name__,
filename=filepath,
message=str(e),
time_started=started,
time_done=done,
information_package_id=self.ip,
task=self.task,
)
raise
Validation.objects.create(
passed=True,
validator=self.__class__.__name__,
filename=filepath,
time_started=started,
time_done=timezone.now(),
information_package_id=self.ip,
task=self.task,
)
logger.info(
"Successful syntax validation of {xml}".format(xml=filepath)
)
@staticmethod
@click.command()
@click.argument('path', metavar='INPUT', type=click.Path(exists=True))
def cli(path):
validator = XMLSyntaxValidator()
try:
validator.validate(path)
except ValidationError as e:
click.echo(e, err=True)
for error in e.errors:
click.echo(error, err=True)
class XMLSchematronValidator(BaseValidator):
def validate(self, filepath, expected=None):
logger.debug('Validating {xml} against {schema}'.format(xml=filepath, schema=self.context))
rootdir = self.options.get('rootdir')
etree.clear_error_log()
started = timezone.now()
relpath = os.path.relpath(filepath, rootdir)
try:
self._validate_schematron(filepath)
except etree.DocumentInvalid as e:
logger.exception(
'Schematron validation of {xml} against {schema} failed'.format(
xml=filepath, schema=self.context
)
)
done = timezone.now()
validation_objs = []
for error in e.error_log:
message = '{line}: {msg}'.format(line=error.line, msg=error.message)
validation_objs.append(Validation(
passed=False,
validator=self.__class__.__name__,
filename=relpath,
message=message,
time_started=started,
time_done=done,
information_package_id=self.ip,
task=self.task,
))
Validation.objects.bulk_create(validation_objs, 100)
raise
except Exception as e:
logger.exception(
'Unknown error during schematron validation of {xml} against {schema}'.format(
xml=filepath, schema=self.context
)
)
done = timezone.now()
Validation.objects.create(
passed=False,
validator=self.__class__.__name__,
filename=relpath,
message=str(e),
time_started=started,
time_done=done,
information_package_id=self.ip,
task=self.task,
)
raise
Validation.objects.create(
passed=True,
validator=self.__class__.__name__,
filename=relpath,
time_started=started,
time_done=timezone.now(),
information_package_id=self.ip,
task=self.task,
)
logger.info(
"Successful schematron validation of {xml} against {schema}".format(
xml=filepath, schema=self.context
)
)
def _validate_schematron(self, filepath):
sct_doc = etree.parse(self.context)
schematron = etree.Schematron(sct_doc)
schematron.assertValid(etree.parse(filepath))
class XMLISOSchematronValidator(BaseValidator):
def validate(self, filepath, expected=None):
logger.debug('Validating {xml} against {schema}'.format(xml=filepath, schema=self.context))
rootdir = self.options.get('rootdir')
etree.clear_error_log()
started = timezone.now()
relpath = os.path.relpath(filepath, rootdir)
try:
self._validate_isoschematron(filepath)
except etree.DocumentInvalid as e:
logger.exception(
'ISO-Schematron validation of {xml} against {schema} failed'.format(
xml=filepath, schema=self.context
)
)
done = timezone.now()
validation_objs = []
for error in e.error_log:
message = '{line}: {msg}'.format(line=error.line, msg=error.message)
validation_objs.append(Validation(
passed=False,
validator=self.__class__.__name__,
filename=relpath,
message=message,
time_started=started,
time_done=done,
information_package_id=self.ip,
task=self.task,
))
Validation.objects.bulk_create(validation_objs, 100)
raise
except Exception as e:
logger.exception(
'Unknown error during iso-schematron validation of {xml} against {schema}'.format(
xml=filepath, schema=self.context
)
)
done = timezone.now()
Validation.objects.create(
passed=False,
validator=self.__class__.__name__,
filename=relpath,
message=str(e),
time_started=started,
time_done=done,
information_package_id=self.ip,
task=self.task,
)
raise
Validation.objects.create(
passed=True,
validator=self.__class__.__name__,
filename=relpath,
time_started=started,
time_done=timezone.now(),
information_package_id=self.ip,
task=self.task,
)
logger.info(
"Successful iso-schematron validation of {xml} against {schema}".format(
xml=filepath, schema=self.context
)
)
def _validate_isoschematron(self, filepath):
sct_doc = etree.parse(self.context)
schematron = isoschematron.Schematron(sct_doc)
schematron.assertValid(etree.parse(filepath))
| gpl-3.0 |
sanjeevtripurari/hue | desktop/core/ext-py/python-openid-2.2.5/openid/test/discoverdata.py | 87 | 4109 | """Module to make discovery data test cases available"""
import urlparse
import os.path
from openid.yadis.discover import DiscoveryResult, DiscoveryFailure
from openid.yadis.constants import YADIS_HEADER_NAME
tests_dir = os.path.dirname(__file__)
data_path = os.path.join(tests_dir, 'data')
testlist = [
# success, input_name, id_name, result_name
(True, "equiv", "equiv", "xrds"),
(True, "header", "header", "xrds"),
(True, "lowercase_header", "lowercase_header", "xrds"),
(True, "xrds", "xrds", "xrds"),
(True, "xrds_ctparam", "xrds_ctparam", "xrds_ctparam"),
(True, "xrds_ctcase", "xrds_ctcase", "xrds_ctcase"),
(False, "xrds_html", "xrds_html", "xrds_html"),
(True, "redir_equiv", "equiv", "xrds"),
(True, "redir_header", "header", "xrds"),
(True, "redir_xrds", "xrds", "xrds"),
(False, "redir_xrds_html", "xrds_html", "xrds_html"),
(True, "redir_redir_equiv", "equiv", "xrds"),
(False, "404_server_response", None, None),
(False, "404_with_header", None, None),
(False, "404_with_meta", None, None),
(False, "201_server_response", None, None),
(False, "500_server_response", None, None),
]
def getDataName(*components):
sanitized = []
for part in components:
if part in ['.', '..']:
raise ValueError
elif part:
sanitized.append(part)
if not sanitized:
raise ValueError
return os.path.join(data_path, *sanitized)
def getExampleXRDS():
filename = getDataName('example-xrds.xml')
return file(filename).read()
example_xrds = getExampleXRDS()
default_test_file = getDataName('test1-discover.txt')
discover_tests = {}
def readTests(filename):
data = file(filename).read()
tests = {}
for case in data.split('\f\n'):
(name, content) = case.split('\n', 1)
tests[name] = content
return tests
def getData(filename, name):
global discover_tests
try:
file_tests = discover_tests[filename]
except KeyError:
file_tests = discover_tests[filename] = readTests(filename)
return file_tests[name]
def fillTemplate(test_name, template, base_url, example_xrds):
mapping = [
('URL_BASE/', base_url),
('<XRDS Content>', example_xrds),
('YADIS_HEADER', YADIS_HEADER_NAME),
('NAME', test_name),
]
for k, v in mapping:
template = template.replace(k, v)
return template
def generateSample(test_name, base_url,
example_xrds=example_xrds,
filename=default_test_file):
try:
template = getData(filename, test_name)
except IOError, why:
import errno
if why[0] == errno.ENOENT:
raise KeyError(filename)
else:
raise
return fillTemplate(test_name, template, base_url, example_xrds)
def generateResult(base_url, input_name, id_name, result_name, success):
input_url = urlparse.urljoin(base_url, input_name)
# If the name is None then we expect the protocol to fail, which
# we represent by None
if id_name is None:
assert result_name is None
return input_url, DiscoveryFailure
result = generateSample(result_name, base_url)
headers, content = result.split('\n\n', 1)
header_lines = headers.split('\n')
for header_line in header_lines:
if header_line.startswith('Content-Type:'):
_, ctype = header_line.split(':', 1)
ctype = ctype.strip()
break
else:
ctype = None
id_url = urlparse.urljoin(base_url, id_name)
result = DiscoveryResult(input_url)
result.normalized_uri = id_url
if success:
result.xrds_uri = urlparse.urljoin(base_url, result_name)
result.content_type = ctype
result.response_text = content
return input_url, result
| apache-2.0 |
Huyuwei/tvm | topi/python/topi/mali/dense.py | 2 | 3966 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable
"""dense schedule on ARM Mali GPU"""
from __future__ import absolute_import as _abs
import tvm
from tvm import autotvm
from .. import generic, nn
from ..util import traverse_inline
autotvm.register_topi_compute(nn.dense, 'mali', 'direct', nn.dense.fdefault)
@autotvm.register_topi_schedule(generic.schedule_dense, 'mali', 'direct')
def schedule_dense(cfg, outs):
"""Schedule for dense operator.
Parameters
----------
cfg: ConfigEntity
The config entity for this template
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for dense.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == 'dense':
vec_size = [1, 2, 4, 8, 16]
max_unroll = 32
dense = op.output(0)
output = outs[0]
y, x = s[output].op.axis
c = s[dense].op.reduce_axis[0]
##### space definition begin #####
cfg.define_split('tile_y', y, num_outputs=3)
cfg.define_split('tile_x', x, num_outputs=3)
cfg.define_split('c_unroll', c, num_outputs=2, max_factor=64)
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
'mali', 'rk3399', 'dense', 'direct')
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
if dense.op in s.outputs:
dense = s.cache_write(output, 'local')
by, ty, yi = cfg['tile_y'].apply(s, output, y)
bx, tx, xi = cfg['tile_x'].apply(s, output, x)
s[output].bind(by, tvm.thread_axis('blockIdx.y'))
s[output].bind(bx, tvm.thread_axis('blockIdx.x'))
s[output].bind(ty, tvm.thread_axis('threadIdx.y'))
s[output].bind(tx, tvm.thread_axis('threadIdx.x'))
if cfg['tile_y'].size[-1] < max_unroll:
s[output].unroll(yi)
if cfg['tile_x'].size[-1] in vec_size:
s[output].vectorize(xi)
s[dense].compute_at(s[output], tx)
k = s[dense].op.reduce_axis[0]
y, x = s[dense].op.axis
k, k_unroll = cfg['c_unroll'].apply(s, dense, k)
s[dense].reorder(k, k_unroll, y, x)
s[dense].unroll(k_unroll)
if cfg['tile_y'].size[-1] < max_unroll:
s[dense].unroll(y)
if cfg['tile_x'].size[-1] in vec_size:
s[dense].vectorize(x)
traverse_inline(s, outs[0].op, _callback)
return s
def fuse_and_bind(s, tensor, axis=None, num_thread=None):
""" fuse all the axis and bind to GPU threads """
axis = axis or s[tensor].op.axis
fused = s[tensor].fuse(*axis)
bx, tx = s[tensor].split(fused, num_thread)
s[tensor].bind(bx, tvm.thread_axis("blockIdx.x"))
s[tensor].bind(tx, tvm.thread_axis("threadIdx.x"))
return bx, tx
| apache-2.0 |
certik/sympy-oldcore | sympy/numerics/evalf_.py | 1 | 3671 | from sympy import *
from float_ import Float, ComplexFloat
import functions
import constants
from utils_ import bitcount
def polyfunc(expr, derivative=False):
"""
Convert a SymPy expression representing a univariate polynomial
into a function for numerical evaluation using Floats /
ComplexFloats.
>>> x = Symbol('x')
>>> polyfunc(x**3 + 4)(2)
Float('12')
If derivative=True is set, the evaluation function evaluates both
the polynomial and its derivative at the given point and returns
the two values as a tuple.
"""
poly = Polynomial(expr)
degree = poly.coeffs[0][1]
coeffs = [0] * int(degree + 1)
for c, e in poly.coeffs:
coeffs[int(e)] = evalf(c)
def g(x):
x = evalf(x)
p = coeffs[int(degree)]
q = 0
for i in xrange(degree-1, -1, -1):
if derivative:
q = p + x*q
p = coeffs[i] + x*p
if derivative:
return p, q
else:
return p
return g
def evalf(expr):
"""
evalf(expr) attempts to evaluate a SymPy expression to a Float or
ComplexFloat with an error smaller than 10**(-Float.getdps())
"""
if isinstance(expr, (Float, ComplexFloat)):
return expr
elif isinstance(expr, (int, float)):
return Float(expr)
elif isinstance(expr, complex):
return ComplexFloat(expr)
expr = Basic.sympify(expr)
if isinstance(expr, (Rational)):
y = Float(expr)
elif isinstance(expr, Real):
y = Float(str(expr))
elif expr is I:
y = ComplexFloat(0,1)
elif expr is pi:
y = constants.pi_float()
elif expr is E:
y = functions.exp(1)
elif isinstance(expr, Mul):
factors = expr[:]
workprec = Float.getprec() + 1 + len(factors)
Float.store()
Float.setprec(workprec)
y = Float(1)
for f in factors:
y *= evalf(f)
Float.revert()
elif isinstance(expr, Pow):
base, expt = expr[:]
workprec = Float.getprec() + 8 # may need more
Float.store()
Float.setprec(workprec)
base = evalf(base)
expt = evalf(expt)
if expt == 0.5:
y = functions.sqrt(base)
else:
y = functions.exp(functions.log(base) * expt)
Float.revert()
elif isinstance(expr, Basic.exp):
Float.store()
Float.setprec(Float.getprec() + 3)
#XXX: how is it possible, that this works:
x = evalf(expr[0])
#and this too:
#x = evalf(expr[1])
#?? (Try to uncomment it and you'll see)
y = functions.exp(x)
Float.revert()
elif isinstance(expr, Add):
# TODO: this doesn't yet work as it should.
# We need some way to handle sums whose results are
# very close to 0, and when necessary, repeat the
# summation with higher precision
reqprec = Float.getprec()
Float.store()
Float.setprec(10)
terms = expr[:]
approxterms = [abs(evalf(x)) for x in terms]
min_mag = min(x.exp for x in approxterms)
max_mag = max(x.exp+bitcount(x.man) for x in approxterms)
Float.setprec(reqprec - 10 + max_mag - min_mag + 1 + len(terms))
workprec = Float.getdps()
y = 0
for t in terms:
y += evalf(t)
Float.revert()
else:
# print expr, expr.__class__
raise NotImplementedError
# print expr, y
return +y
| bsd-3-clause |
Validus-Kernel/android_kernel_moto_shamu | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
kidaa/pyamg | pyamg/krylov/tests/test_krylov.py | 2 | 10226 | from pyamg.krylov import bicgstab, cg, cgne, cgnr, cr, fgmres, gmres
from pyamg.krylov._gmres_householder import gmres_householder
from pyamg.krylov._gmres_mgs import gmres_mgs
from numpy import array, zeros, ones
from scipy import mat, random
from scipy.linalg import solve
from pyamg.util.linalg import norm
import pyamg
from numpy.testing import TestCase, assert_array_almost_equal, assert_equal
class TestKrylov(TestCase):
def setUp(self):
self.cases = []
self.spd_cases = []
self.symm_cases = []
# self.oblique = [gmres, fgmres, cgnr,
# krylov._gmres_householder.gmres_householder,
# krylov._gmres_mgs.gmres_mgs]
self.oblique = [gmres_householder, gmres_mgs, gmres, fgmres, cgnr]
self.symm_oblique = [cr]
self.orth = [cgne]
self.inexact = [bicgstab]
self.spd_orth = [cg]
# 1x1
A = mat([[1.2]])
b = array([3.9]).reshape(-1, 1)
x0 = zeros((1, 1))
self.cases.append({'A': A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 1, 'reduction_factor': 1e-10})
self.spd_cases.append({'A': A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 1, 'reduction_factor': 1e-10})
self.symm_cases.append({'A': A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 1, 'reduction_factor': 1e-10})
# 4x4
A = mat([[1.2, 0., 0., 0.],
[0., 4., 2., 6.],
[0., 0., 9.3, -2.31],
[-4., 0., 0., -11.]])
b = array([1., 3.9, 0., -1.23]).reshape(-1, 1)
x0 = zeros((4, 1))
self.cases.append({'A': A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 4, 'reduction_factor': 1e-10})
self.spd_cases.append({'A': A.T*A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 4, 'reduction_factor': 1e-10})
self.symm_cases.append({'A': A.T + A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 4, 'reduction_factor': 1e-10})
# 4x4 Imaginary
A = mat(A, dtype=complex)
A[0, 0] += 3.1j
A[3, 3] -= 1.34j
A[1, 3] *= 1.0j
A[1, 2] += 1.0j
b = array([1. - 1.0j, 2.0 - 3.9j, 0., -1.23]).reshape(-1, 1)
x0 = ones((4, 1))
self.cases.append({'A': A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 4, 'reduction_factor': 1e-10})
self.spd_cases.append({'A': A.H*A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 4, 'reduction_factor': 1e-10})
self.symm_cases.append({'A': A.H + A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 4, 'reduction_factor': 1e-10})
# 10x10
A = mat([[-1.1, 0., 0., 0., 3.9, 0., 0., 11., -1., 0.],
[0., 4., 2.9, 0., 0., 6.8, 0., 0., 0., 0.],
[0., 0., 9.0, 0., 0., 0.8, 1., -2.2, 0., 9.],
[-4., 0., 0.0, 0., 0., 0.0, 2., 2.2, 0., 0.],
[0., 0., 0.0, 21., 0., 0.1, 0., 0., 0., 0.],
[0., 0., 0.0, 0., -4.7, 0.0, 0., 0., 0., 0.],
[2.1, 7., 22.0, 0., 0., 0.0, 0., 0., 0., 0.],
[0., 0., 0.0, 34., 0., 0.0, 0., 0., -12.3, 0.],
[0., 3.4, 0.0, 0., 0., -0.3, 0., 0., 0., 0.],
[9., 0., 0.0, 0., 87., 0.0, 0., 0., 0., -11.2]])
b = array([1., 0., 0.2, 8., 0., -1.9,
11.3, 0.0, 0.1, 0.0]).reshape(-1, 1)
x0 = zeros((10, 1))
x0[4] = 11.1
x0[7] = -2.
self.cases.append({'A': A, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 2, 'reduction_factor': 0.98})
self.symm_cases.append({'A': A + A.T, 'b': b, 'x0': x0, 'tol': 1e-16,
'maxiter': 2, 'reduction_factor': 0.98})
self.spd_cases.append({'A':
mat(pyamg.gallery.poisson((10,)).todense()),
'b': b, 'x0': x0, 'tol': 1e-16, 'maxiter': 2,
'reduction_factor': 0.98})
def test_gmres(self):
# Ensure repeatability
random.seed(0)
# For these small matrices, Householder and MGS GMRES should give the
# same result, and for symmetric (but possibly indefinite) matrices CR
# and GMRES should give same result
for maxiter in [1, 2, 3]:
for case, symm_case in zip(self.cases, self.symm_cases):
A = case['A']
b = case['b']
x0 = case['x0']
A_symm = symm_case['A']
b_symm = symm_case['b']
x0_symm = symm_case['x0']
# Test agreement between Householder and GMRES
(x, flag) = gmres_householder(A, b, x0=x0,
maxiter=min(A.shape[0], maxiter))
(x2, flag2) = gmres_mgs(A, b, x0=x0, maxiter=min(A.shape[0],
maxiter))
assert_array_almost_equal(x/norm(x), x2/norm(x2),
err_msg='Householder GMRES and MGS\
GMRES gave different\
results for small matrix')
assert_equal(flag, flag2,
err_msg='Householder GMRES and MGS GMRES returned\
different convergence flags for small\
matrix')
# Test agreement between GMRES and CR
if A_symm.shape[0] > 1:
residuals2 = []
(x2, flag2) = gmres_mgs(A_symm, b_symm, x0=x0_symm,
maxiter=min(A.shape[0], maxiter),
residuals=residuals2)
residuals3 = []
(x3, flag2) = cr(A_symm, b_symm, x0=x0_symm,
maxiter=min(A.shape[0], maxiter),
residuals=residuals3)
residuals2 = array(residuals2)
residuals3 = array(residuals3)
assert_array_almost_equal(residuals3/norm(residuals3),
residuals2/norm(residuals2),
err_msg='CR and GMRES yield\
different residual\
vectors')
assert_array_almost_equal(x2/norm(x2), x3/norm(x3),
err_msg='CR and GMRES yield\
different answers')
def test_krylov(self):
# Oblique projectors reduce the residual
for method in self.oblique:
for case in self.cases:
A = case['A']
b = case['b']
x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'],
maxiter=case['maxiter'])
xNew = xNew.reshape(-1, 1)
assert_equal((norm(b - A*xNew)/norm(b - A*x0)) <
case['reduction_factor'], True,
err_msg='Oblique Krylov Method Failed Test')
# Oblique projectors reduce the residual, here we consider oblique
# projectors for symmetric matrices
for method in self.symm_oblique:
for case in self.symm_cases:
A = case['A']
b = case['b']
x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'],
maxiter=case['maxiter'])
xNew = xNew.reshape(-1, 1)
assert_equal((norm(b - A*xNew)/norm(b - A*x0)) <
case['reduction_factor'], True,
err_msg='Symmetric oblique Krylov Method Failed\
Test')
# Orthogonal projectors reduce the error
for method in self.orth:
for case in self.cases:
A = case['A']
b = case['b']
x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'],
maxiter=case['maxiter'])
xNew = xNew.reshape(-1, 1)
soln = solve(A, b)
assert_equal((norm(soln - xNew)/norm(soln - x0)) <
case['reduction_factor'], True,
err_msg='Orthogonal Krylov Method Failed Test')
# SPD Orthogonal projectors reduce the error
for method in self.spd_orth:
for case in self.spd_cases:
A = case['A']
b = case['b']
x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'],
maxiter=case['maxiter'])
xNew = xNew.reshape(-1, 1)
soln = solve(A, b)
assert_equal((norm(soln - xNew)/norm(soln - x0)) <
case['reduction_factor'], True,
err_msg='Orthogonal Krylov Method Failed Test')
# Assume that Inexact Methods reduce the residual for these examples
for method in self.inexact:
for case in self.cases:
A = case['A']
b = case['b']
x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'],
maxiter=A.shape[0])
xNew = xNew.reshape(-1, 1)
assert_equal((norm(b - A*xNew)/norm(b - A*x0)) < 0.15, True,
err_msg='Inexact Krylov Method Failed Test')
| bsd-3-clause |
Zhongqilong/kbengine | kbe/src/lib/python/Lib/test/test_import.py | 60 | 40179 | # We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import unittest
import unittest.mock as mock
import textwrap
import errno
import shutil
import contextlib
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE)
from test import script_helper
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyo",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with script_helper.temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
pyo = TESTFN + ".pyo"
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
unlink(pyo)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc (or .pyo).
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertIn(ext, ('.pyc', '.pyo'))
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc/.pyo files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
if __debug__:
bytecode_only = path + "c"
else:
bytecode_only = path + "o"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(12)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147 related behaviors.
tag = sys.implementation.cache_tag
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertTrue(os.path.exists(os.path.join(
'__pycache__', '{}.{}.py{}'.format(
TESTFN, self.tag, 'c' if __debug__ else 'o'))))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertFalse(os.path.exists(os.path.join(
'__pycache__', '{}.{}.pyc'.format(TESTFN, self.tag))))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.FileFinder, mod.FileFinder)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
| lgpl-3.0 |
cancro7/gem5 | src/arch/x86/isa/insts/general_purpose/control_transfer/interrupts_and_exceptions.py | 43 | 8074 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop IRET_REAL {
.serializing
panic "Real mode iret isn't implemented!"
};
def macroop IRET_PROT {
.serializing
.adjust_env oszIn64Override
# Check for a nested task. This isn't supported at the moment.
rflag t1, 14; #NT bit
panic "Task switching with iret is unimplemented!", flags=(nCEZF,)
#t1 = temp_RIP
#t2 = temp_CS
#t3 = temp_RFLAGS
#t4 = handy m5 register
# Pop temp_RIP, temp_CS, and temp_RFLAGS
ld t1, ss, [1, t0, rsp], "0 * env.stackSize", dataSize=ssz
ld t2, ss, [1, t0, rsp], "1 * env.stackSize", dataSize=ssz
ld t3, ss, [1, t0, rsp], "2 * env.stackSize", dataSize=ssz
# Read the handy m5 register for use later
rdm5reg t4
###
### Handle if we're returning to virtual 8086 mode.
###
#IF ((temp_RFLAGS.VM=1) && (CPL=0) && (LEGACY_MODE))
# IRET_FROM_PROTECTED_TO_VIRTUAL
#temp_RFLAGS.VM != 1
rcri t0, t3, 18, flags=(ECF,)
br label("protToVirtFallThrough"), flags=(nCECF,)
#CPL=0
andi t0, t4, 0x30, flags=(EZF,)
br label("protToVirtFallThrough"), flags=(nCEZF,)
#(LEGACY_MODE)
rcri t0, t4, 1, flags=(ECF,)
br label("protToVirtFallThrough"), flags=(nCECF,)
panic "iret to virtual mode not supported"
protToVirtFallThrough:
#temp_CPL = temp_CS.rpl
andi t5, t2, 0x3
###
### Read in the info for the new CS segment.
###
#CS = READ_DESCRIPTOR (temp_CS, iret_chk)
andi t0, t2, 0xFC, flags=(EZF,), dataSize=2
br label("processCSDescriptor"), flags=(CEZF,)
andi t6, t2, 0xF8, dataSize=8
andi t0, t2, 0x4, flags=(EZF,), dataSize=2
br label("globalCSDescriptor"), flags=(CEZF,)
ld t8, tsl, [1, t0, t6], dataSize=8, atCPL0=True
br label("processCSDescriptor")
globalCSDescriptor:
ld t8, tsg, [1, t0, t6], dataSize=8, atCPL0=True
processCSDescriptor:
chks t2, t6, dataSize=8
###
### Get the new stack pointer and stack segment off the old stack if necessary,
### and piggyback on the logic to check the new RIP value.
###
#IF ((64BIT_MODE) || (temp_CPL!=CPL))
#{
#(64BIT_MODE)
andi t0, t4, 0xE, flags=(EZF,)
# Since we just found out we're in 64 bit mode, take advantage and
# do the appropriate RIP checks.
br label("doPopStackStuffAndCheckRIP"), flags=(CEZF,)
# Here, we know we're -not- in 64 bit mode, so we should do the
# appropriate/other RIP checks.
# if temp_RIP > CS.limit throw #GP(0)
rdlimit t6, cs, dataSize=8
sub t0, t1, t6, flags=(ECF,)
fault "std::make_shared<GeneralProtection>(0)", flags=(CECF,)
#(temp_CPL!=CPL)
srli t7, t4, 4
xor t7, t7, t5
andi t0, t7, 0x3, flags=(EZF,)
br label("doPopStackStuff"), flags=(nCEZF,)
# We can modify user visible state here because we're know
# we're done with things that can fault.
addi rsp, rsp, "3 * env.stackSize"
br label("fallThroughPopStackStuff")
doPopStackStuffAndCheckRIP:
# Check if the RIP is canonical.
srai t7, t1, 47, flags=(EZF,), dataSize=ssz
# if t7 isn't 0 or -1, it wasn't canonical.
br label("doPopStackStuff"), flags=(CEZF,)
addi t0, t7, 1, flags=(EZF,), dataSize=ssz
fault "std::make_shared<GeneralProtection>(0)", flags=(nCEZF,)
doPopStackStuff:
# POP.v temp_RSP
ld t6, ss, [1, t0, rsp], "3 * env.dataSize", dataSize=ssz
# POP.v temp_SS
ld t9, ss, [1, t0, rsp], "4 * env.dataSize", dataSize=ssz
# SS = READ_DESCRIPTOR (temp_SS, ss_chk)
andi t0, t9, 0xFC, flags=(EZF,), dataSize=2
br label("processSSDescriptor"), flags=(CEZF,)
andi t7, t9, 0xF8, dataSize=8
andi t0, t9, 0x4, flags=(EZF,), dataSize=2
br label("globalSSDescriptor"), flags=(CEZF,)
ld t7, tsl, [1, t0, t7], dataSize=8, atCPL0=True
br label("processSSDescriptor")
globalSSDescriptor:
ld t7, tsg, [1, t0, t7], dataSize=8, atCPL0=True
processSSDescriptor:
chks t9, t7, dataSize=8
# This actually updates state which is wrong. It should wait until we know
# we're not going to fault. Unfortunately, that's hard to do.
wrdl ss, t7, t9
wrsel ss, t9
###
### From this point downwards, we can't fault. We can update user visible state.
###
# RSP.s = temp_RSP
mov rsp, rsp, t6, dataSize=ssz
#}
fallThroughPopStackStuff:
# Update CS
wrdl cs, t8, t2
wrsel cs, t2
#CPL = temp_CPL
#IF (changing CPL)
#{
srli t7, t4, 4
xor t7, t7, t5
andi t0, t7, 0x3, flags=(EZF,)
br label("skipSegmentSquashing"), flags=(CEZF,)
# The attribute register needs to keep track of more info before this will
# work the way it needs to.
# FOR (seg = ES, DS, FS, GS)
# IF ((seg.attr.dpl < cpl && ((seg.attr.type = 'data')
# || (seg.attr.type = 'non-conforming-code')))
# {
# seg = NULL
# }
#}
skipSegmentSquashing:
# Ignore this for now.
#RFLAGS.v = temp_RFLAGS
wrflags t0, t3
# VIF,VIP,IOPL only changed if (old_CPL = 0)
# IF only changed if (old_CPL <= old_RFLAGS.IOPL)
# VM unchanged
# RF cleared
#RIP = temp_RIP
wrip t0, t1, dataSize=ssz
};
def macroop IRET_VIRT {
panic "Virtual mode iret isn't implemented!"
};
def macroop INT3 {
limm t1, 0x03, dataSize=8
rdip t7
# Are we in long mode?
rdm5reg t5
andi t0, t5, 0x1, flags=(EZF,)
br rom_label("longModeSoftInterrupt"), flags=(CEZF,)
br rom_label("legacyModeInterrupt")
};
def macroop INT_I {
#load the byte-sized interrupt vector specified in the instruction
.adjust_imm trimImm(8)
limm t1, imm, dataSize=8
rdip t7
# Are we in long mode?
rdm5reg t5
andi t0, t5, 0x1, flags=(EZF,)
br rom_label("longModeSoftInterrupt"), flags=(CEZF,)
br rom_label("legacyModeInterrupt")
};
'''
#let {{
# class INT(Inst):
# "GenFault ${new UnimpInstFault}"
# class INTO(Inst):
# "GenFault ${new UnimpInstFault}"
#}};
| bsd-3-clause |
suqinhuang/virt-test | tools/download_manager.py | 3 | 2223 | #!/usr/bin/python
"""
Download helper for blobs needed for virt testing.
Downloads blobs defined in assets. Assets are .ini files that contain the
following config keys:
title: Title string to display in the download progress bar.
url = URL of the resource
sha1_url = URL with SHA1 information for the resource, in the form
sha1sum file_basename
destination = Location of your file relative to the data directory
(TEST_SUITE_ROOT/shared/data)
destination_uncompressed (optional) = Location of the uncompressed file
relative to the data directory (TEST_SUITE_ROOT/shared/data)
uncompress_cmd (optionl) = Command that needs to be executed with the
compressed file as a parameter
@copyright: Red Hat 2012
"""
import glob, os, sys, logging, time
import common
from autotest.client.shared import logging_manager
from virttest import asset, utils_misc
def download_assets():
all_assets = asset.get_all_assets()
if all_assets:
logging.info("Available download assets:")
logging.info("")
for asset_info in all_assets:
asset_keys = asset_info.keys()
logging.info("%d - %s" % (all_assets.index(asset_info) + 1,
asset_info['title']))
asset_keys.pop(asset_keys.index('title'))
asset_keys.sort()
for k in asset_keys:
logging.info(" %s = %s" % (k, asset_info[k]))
logging.info("")
indexes = raw_input("%s INFO | Type the index for the assets you want to "
"download (comma separated): " %
time.strftime("%H:%M:%S", time.localtime()))
index_list = []
for idx in indexes.split(","):
try:
index = int(idx) - 1
index_list.append(index)
all_assets[index]
except (ValueError, IndexError):
logging.error("Invalid index(es), aborting...")
sys.exit(1)
for idx in index_list:
asset_info = all_assets[idx]
asset.download_file(asset_info, interactive=True)
if __name__ == "__main__":
logging_manager.configure_logging(utils_misc.VirtLoggingConfig())
download_assets()
| gpl-2.0 |
kennedyshead/home-assistant | tests/components/lyric/test_config_flow.py | 2 | 6335 | """Test the Honeywell Lyric config flow."""
import asyncio
from unittest.mock import patch
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.http import CONF_BASE_URL, DOMAIN as DOMAIN_HTTP
from homeassistant.components.lyric import config_flow
from homeassistant.components.lyric.const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.common import MockConfigEntry
CLIENT_ID = "1234"
CLIENT_SECRET = "5678"
@pytest.fixture()
async def mock_impl(hass):
"""Mock implementation."""
await setup.async_setup_component(hass, "http", {})
impl = config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
CLIENT_ID,
CLIENT_SECRET,
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
config_flow.OAuth2FlowHandler.async_register_implementation(hass, impl)
return impl
async def test_abort_if_no_configuration(hass):
"""Check flow abort when no configuration."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "missing_configuration"
async def test_full_flow(
hass, aiohttp_client, aioclient_mock, current_request_with_host
):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
},
DOMAIN_HTTP: {CONF_BASE_URL: "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["url"] == (
f"{OAUTH2_AUTHORIZE}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("homeassistant.components.lyric.api.ConfigEntryLyricClient"), patch(
"homeassistant.components.lyric.async_setup_entry", return_value=True
) as mock_setup:
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["data"]["auth_implementation"] == DOMAIN
result["data"]["token"].pop("expires_at")
assert result["data"]["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert DOMAIN in hass.config.components
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.state is config_entries.ConfigEntryState.LOADED
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
async def test_abort_if_authorization_timeout(
hass, mock_impl, current_request_with_host
):
"""Check Somfy authorization timeout."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
flow = config_flow.OAuth2FlowHandler()
flow.hass = hass
with patch.object(
mock_impl, "async_generate_authorize_url", side_effect=asyncio.TimeoutError
):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "authorize_url_timeout"
async def test_reauthentication_flow(
hass, aiohttp_client, aioclient_mock, current_request_with_host
):
"""Test reauthentication flow."""
await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
},
DOMAIN_HTTP: {CONF_BASE_URL: "https://example.com"},
},
)
old_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=DOMAIN,
version=1,
data={"id": "timmo", "auth_implementation": DOMAIN},
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=old_entry.data
)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
result = await hass.config_entries.flow.async_configure(flows[0]["flow_id"], {})
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("homeassistant.components.lyric.api.ConfigEntryLyricClient"):
with patch(
"homeassistant.components.lyric.async_setup_entry", return_value=True
) as mock_setup:
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert len(mock_setup.mock_calls) == 1
| apache-2.0 |
jtyuan/racetrack | src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_integer_to_floating_point.py | 91 | 3214 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CVTDQ2PS_XMM_XMM {
cvti2f xmml, xmmlm, size=4, ext=0
cvti2f xmmh, xmmhm, size=4, ext=0
};
def macroop CVTDQ2PS_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
cvti2f xmml, ufp1, size=4, ext=0
cvti2f xmmh, ufp2, size=4, ext=0
};
def macroop CVTDQ2PS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
cvti2f xmml, ufp1, size=4, ext=0
cvti2f xmmh, ufp2, size=4, ext=0
};
def macroop CVTDQ2PD_XMM_XMM {
cvti2f xmmh, xmmlm, srcSize=4, destSize=8, ext=2
cvti2f xmml, xmmlm, srcSize=4, destSize=8, ext=0
};
def macroop CVTDQ2PD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvti2f xmml, ufp1, srcSize=4, destSize=8, ext=0
cvti2f xmmh, ufp1, srcSize=4, destSize=8, ext=2
};
def macroop CVTDQ2PD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvti2f xmml, ufp1, srcSize=4, destSize=8, ext=0
cvti2f xmmh, ufp1, srcSize=4, destSize=8, ext=2
};
'''
| bsd-3-clause |
jdeblese/ergovolve | proposals.py | 1 | 3554 | #!/usr/bin/env python
ergodoxian = (
("KEY_DeleteBackspace","1x2"),
("KEY_DeleteForward","1x2"),
('KEY_ReturnEnter', '1x2'),
('KEY_Spacebar', '1x2'),
('SPECIAL_Fn', '1x2'),
('KEY_Shift', '1.5x1'),
('KEY_Shift', '1.5x1'),
("KEY_Dash_Underscore", "1.5x1"),
("KEY_Equal_Plus", "1.5x1"),
('KEY_ReturnEnter', '1.5x1'),
("KEY_Escape", "1.5x1"),
("KEY_DeleteForward","1.5x1"),
('SPECIAL_Fn', '1x1.5'),
("KEY_LeftBracket_LeftBrace", "1x1.5"),
("KEY_RightBracket_RightBrace", "1x1.5"),
("KEY_SingleQuote_DoubleQuote", "1.5x1"),
("KEY_GraveAccent_Tilde", "1.5x1"),
("KEY_Slash_Question", "1.5x1"),
("KEY_Tab","1x1.5"))
new1 = (
("KEY_DeleteBackspace","1x2"),
("KEY_DeleteForward","1x2"),
('KEY_ReturnEnter', '1x2'),
('KEY_Spacebar', '1x2'),
('SPECIAL_Fn', '1x2'),
('KEY_Shift', '1.5x1'),
('KEY_Shift', '1.5x1'),
("KEY_Dash_Underscore", "1.5x1"),
("KEY_Equal_Plus", "1.5x1"),
('KEY_ReturnEnter', '1.5x1'),
("KEY_Escape", "1.5x1"),
("KEY_DeleteForward","1.5x1"),
('SPECIAL_Fn', '1x1.5'),
("KEY_LeftBracket_LeftBrace", "1x1.5"),
("KEY_RightBracket_RightBrace", "1x1.5"),
("KEY_SingleQuote_DoubleQuote", "1.5x1"),
("KEY_GraveAccent_Tilde", "1.5x1"),
("KEY_Slash_Question", "1.5x1"),
('SPECIAL_Fn', '1x1.5'))
new2 = (
('KEY_Shift', '1x2'),
("KEY_DeleteForward","1x2"),
('KEY_ReturnEnter', '1x2'),
('KEY_Spacebar', '1x2'),
('SPECIAL_Fn', '1x2'),
('KEY_Shift', '1.5x1'),
('KEY_Shift', '1.5x1'),
("KEY_Dash_Underscore", "1.5x1"),
("KEY_Equal_Plus", "1.5x1"),
('KEY_ReturnEnter', '1.5x1'),
("KEY_Escape", "1.5x1"),
("KEY_DeleteForward","1.5x1"),
('SPECIAL_Fn', '1x1.5'),
("KEY_LeftBracket_LeftBrace", "1x1.5"),
("KEY_RightBracket_RightBrace", "1x1.5"),
("KEY_SingleQuote_DoubleQuote", "1.5x1"),
("KEY_GraveAccent_Tilde", "1.5x1"),
("KEY_Slash_Question", "1.5x1"),
('KEY_Tab', '1x1.5'))
new3 = (
('KEY_Shift', '1x2'),
("KEY_DeleteForward","1x2"),
('KEY_ReturnEnter', '1x2'),
('KEY_Spacebar', '1x2'),
('SPECIAL_Fn', '1x2'),
('KEY_Shift', '1.5x1'),
('KEY_Shift', '1.5x1'),
("KEY_Dash_Underscore", "1.5x1"),
("KEY_Equal_Plus", "1.5x1"),
('KEY_ReturnEnter', '1.5x1'),
("KEY_Escape", "1.5x1"),
("KEY_DeleteForward","1.5x1"),
('SPECIAL_Fn', '1x1.5'),
("KEY_LeftBracket_LeftBrace", "1x1.5"),
("KEY_RightBracket_RightBrace", "1x1.5"),
("KEY_SingleQuote_DoubleQuote", "1.5x1"),
("KEY_GraveAccent_Tilde", "1.5x1"),
("KEY_Slash_Question", "1.5x1"),
('SPECIAL_Fn', '1x1.5'))
new4 = (
('KEY_Shift', '1x2'),
("KEY_DeleteForward","1x2"),
('KEY_ReturnEnter', '1x2'),
('KEY_Spacebar', '1x2'),
('SPECIAL_Fn', '1x2'),
('KEY_Shift', '1.5x1'),
('KEY_Shift', '1.5x1'),
("KEY_Dash_Underscore", "1.5x1"),
("KEY_Equal_Plus", "1.5x1"),
('KEY_ReturnEnter', '1.5x1'),
("KEY_Escape", "1.5x1"),
("KEY_DeleteForward","1.5x1"),
('SPECIAL_Fn', '1x1.5'),
("KEY_LeftBracket_LeftBrace", "1x1.5"),
("KEY_RightBracket_RightBrace", "1x1.5"),
("KEY_SingleQuote_DoubleQuote", "1.5x1"),
("KEY_GraveAccent_Tilde", "1.5x1"),
("KEY_Slash_Question", "1.5x1"),
('SPECIAL_Fn', '1x2'))
new5 = (
#('KEY_Shift', '1x2'),
("KEY_DeleteForward","1x2"),
('KEY_ReturnEnter', '1x2'),
('KEY_Spacebar', '1x2'),
('SPECIAL_Fn', '1x2'),
('KEY_Shift', '1.5x1'),
#('KEY_Shift', '1.5x1'),
("KEY_Dash_Underscore", "1.5x1"),
("KEY_Equal_Plus", "1.5x1"),
('KEY_ReturnEnter', '1.5x1'),
("KEY_Escape", "1.5x1"),
("KEY_DeleteForward","1.5x1"),
('SPECIAL_Fn', '1x1.5'),
("KEY_LeftBracket_LeftBrace", "1x1.5"),
("KEY_RightBracket_RightBrace", "1x1.5"),
("KEY_SingleQuote_DoubleQuote", "1.5x1"),
("KEY_GraveAccent_Tilde", "1.5x1"),
("KEY_Slash_Question", "1.5x1"),
#('SPECIAL_Fn', '1x1.5'),
)
hof = (ergodoxian, new1, new2, new3, new4, new5)
| mit |
karrtikr/ete | examples/nexml/nexml_parser.py | 5 | 2129 | from ete3 import Nexml
# Create an empty Nexml project
nexml_project = Nexml()
# Load content from NeXML file
nexml_project.build_from_file("trees.xml")
# All XML elements are within the project instance.
# exist in each element to access their attributes.
print "Loaded Taxa:"
for taxa in nexml_project.get_otus():
for otu in taxa.get_otu():
print "OTU:", otu.id
# Extracts all the collection of trees in the project
tree_collections = nexml_project.get_trees()
# Select the first collection
collection_1 = tree_collections[0]
# print the topology of every tree
for tree in collection_1.get_tree():
# trees contain all the nexml information in their "nexml_node",
# "nexml_tree", and "nexml_edge" attributes.
print "Tree id", tree.nexml_tree.id
print tree
for node in tree.traverse():
print "node", node.nexml_node.id, "is associated with", node.nexml_node.otu, "OTU"
# Output:
# ==========
# Loaded Taxa:
# OTU: t1
# OTU: t2
# OTU: t3
# OTU: t4
# OTU: t5
# Tree id tree1
#
# /-n5(n5)
# /---|
# | \-n6(n6)
# /---|
# | | /-n8(n8)
# ----| \---|
# | \-n9(n9)
# |
# \-n2(n2)
# node n1 is associated with None OTU
# node n3 is associated with None OTU
# node n2 is associated with t1 OTU
# node n4 is associated with None OTU
# node n7 is associated with None OTU
# node n5 is associated with t3 OTU
# node n6 is associated with t2 OTU
# node n8 is associated with t5 OTU
# node n9 is associated with t4 OTU
# Tree id tree2
#
# /-tree2n5(n5)
# /---|
# | \-tree2n6(n6)
# /---|
# | | /-tree2n8(n8)
# ----| \---|
# | \-tree2n9(n9)
# |
# \-tree2n2(n2)
# node tree2n1 is associated with None OTU
# node tree2n3 is associated with None OTU
# node tree2n2 is associated with t1 OTU
# node tree2n4 is associated with None OTU
# node tree2n7 is associated with None OTU
# node tree2n5 is associated with t3 OTU
# node tree2n6 is associated with t2 OTU
# node tree2n8 is associated with t5 OTU
# node tree2n9 is associated with t4 OTU
| gpl-3.0 |
programadorjc/django | tests/view_tests/tests/test_i18n.py | 188 | 13917 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import gettext
import json
import os
import unittest
from os import path
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, modify_settings,
override_settings,
)
from django.utils import six
from django.utils._os import upath
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY, override
from ..urls import locale_dir
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18NTests(TestCase):
""" Tests django views in django/views/i18n.py """
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
for lang_code, lang_name in settings.LANGUAGES:
post_data = dict(language=lang_code, next='/')
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertRedirects(response, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code, lang_name = settings.LANGUAGES[0]
post_data = dict(language=lang_code, next='//unsafe/redirection/')
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/i18n/setlang/')
def test_setlang_cookie(self):
# we force saving language to a cookie rather than a session
# by excluding session middleware and those which do require it
test_settings = dict(
MIDDLEWARE_CLASSES=['django.middleware.common.CommonMiddleware'],
LANGUAGE_COOKIE_NAME='mylanguage',
LANGUAGE_COOKIE_AGE=3600 * 7 * 2,
LANGUAGE_COOKIE_DOMAIN='.example.com',
LANGUAGE_COOKIE_PATH='/test/',
)
with self.settings(**test_settings):
post_data = dict(language='pl', next='/views/')
response = self.client.post('/i18n/setlang/', data=post_data)
language_cookie = response.cookies.get('mylanguage')
self.assertEqual(language_cookie.value, 'pl')
self.assertEqual(language_cookie['domain'], '.example.com')
self.assertEqual(language_cookie['path'], '/test/')
self.assertEqual(language_cookie['max-age'], 3600 * 7 * 2)
@modify_settings(MIDDLEWARE_CLASSES={
'append': 'django.middleware.locale.LocaleMiddleware',
})
def test_lang_from_translated_i18n_pattern(self):
response = self.client.post(
'/i18n/setlang/', data={'language': 'nl'},
follow=True, HTTP_REFERER='/en/translated/'
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'nl')
self.assertRedirects(response, '/nl/vertaald/')
# And reverse
response = self.client.post(
'/i18n/setlang/', data={'language': 'en'},
follow=True, HTTP_REFERER='/nl/vertaald/'
)
self.assertRedirects(response, '/en/translated/')
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
if six.PY3:
trans_txt = catalog.gettext('this is to be translated')
else:
trans_txt = catalog.ugettext('this is to be translated')
response = self.client.get('/jsi18n/')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# json.dumps() is used to be able to check unicode strings
self.assertContains(response, json.dumps(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, '"month name\\u0004May": "mai"', 1)
def test_jsoni18n(self):
"""
The json_catalog returns the language catalog and settings as JSON.
"""
with override('de'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode('utf-8'))
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertIn('plural', data)
self.assertEqual(data['catalog']['month name\x04May'], 'Mai')
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertEqual(data['plural'], '(n != 1)')
@override_settings(ROOT_URLCONF='view_tests.urls')
class JsI18NTests(SimpleTestCase):
"""
Tests django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE.
"""
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsoni18n_with_missing_en_files(self):
"""
Same as above for the json_catalog view. Here we also check for the
expected JSON format.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode('utf-8'))
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertIn('plural', data)
self.assertEqual(data['catalog'], {})
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertIsNone(data['plural'])
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('fi'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'il faut le traduire')
def test_i18n_language_non_english_default(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
@modify_settings(INSTALLED_APPS={'append': 'view_tests.app0'})
def test_non_english_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n_english_translation/')
self.assertContains(response, 'this app0 string is to be translated')
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('none'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'Choisir une heure')
def test_escaping(self):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app5']})
def test_non_BMP_char(self):
"""
Non-BMP characters should not break the javascript_catalog (#21725).
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n/app5/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'emoji')
self.assertContains(response, '\\ud83d\\udca9')
@override_settings(ROOT_URLCONF='view_tests.urls')
class JsI18NTestsMultiPackage(SimpleTestCase):
"""
Tests for django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE and merge JS translation from several packages.
"""
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
def test_i18n_language_english_default(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n_multi_packages1/')
self.assertContains(response, 'il faut traduire cette cha\\u00eene de caract\\u00e8res de app1')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app3', 'view_tests.app4']})
def test_i18n_different_non_english_languages(self):
"""
Similar to above but with neither default or requested language being
English.
"""
with self.settings(LANGUAGE_CODE='fr'), override('es-ar'):
response = self.client.get('/jsi18n_multi_packages2/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_with_locale_paths(self):
extended_locale_paths = settings.LOCALE_PATHS + [
path.join(
path.dirname(path.dirname(path.abspath(upath(__file__)))),
'app3',
'locale',
),
]
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/jsi18n/')
self.assertContains(response,
'este texto de app3 debe ser traducido')
skip_selenium = not os.environ.get('DJANGO_SELENIUM_TESTS', False)
@unittest.skipIf(skip_selenium, 'Selenium tests not requested')
@override_settings(ROOT_URLCONF='view_tests.urls')
class JavascriptI18nTests(LiveServerTestCase):
# The test cases use fixtures & translations from these apps.
available_apps = [
'django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'view_tests',
]
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
@classmethod
def setUpClass(cls):
try:
cls.selenium = import_string(cls.webdriver_class)()
except Exception as e:
raise unittest.SkipTest('Selenium webdriver "%s" not installed or '
'not operational: %s' % (cls.webdriver_class, str(e)))
super(JavascriptI18nTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(JavascriptI18nTests, cls).tearDownClass()
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
self.selenium.get('%s%s' % (self.live_server_url, '/jsi18n_template/'))
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
@override_settings(LANGUAGE_CODE='fr')
def test_multiple_catalogs(self):
self.selenium.get('%s%s' % (self.live_server_url, '/jsi18n_multi_catalogs/'))
elem = self.selenium.find_element_by_id('app1string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app1')
elem = self.selenium.find_element_by_id('app2string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app2')
class JavascriptI18nChromeTests(JavascriptI18nTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class JavascriptI18nIETests(JavascriptI18nTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| bsd-3-clause |
shakamunyi/nova | nova/db/sqlalchemy/migrate_repo/versions/268_add_host_in_compute_node.py | 14 | 2188 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import UniqueConstraint
from sqlalchemy import MetaData, Table, Column, String
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Add a new column host
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
# NOTE(sbauza) : Old compute nodes can report stats without this field, we
# need to set it as nullable
host = Column('host', String(255), nullable=True)
if not hasattr(compute_nodes.c, 'host'):
compute_nodes.create_column(host)
if not hasattr(shadow_compute_nodes.c, 'host'):
shadow_compute_nodes.create_column(host.copy())
# NOTE(sbauza) : Populate the host field with the value from the services
# table will be done at the ComputeNode object level when save()
ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname")
ukey.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Remove the new column
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname")
ukey.drop()
compute_nodes.drop_column('host')
shadow_compute_nodes.drop_column('host')
| apache-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.5/django/utils/unittest/result.py | 570 | 6105 | """Test result object"""
import sys
import traceback
import unittest
from StringIO import StringIO
from django.utils.unittest import util
from django.utils.unittest.compatibility import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(unittest.TestResult):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_moduleSetUpFailed = False
def __init__(self):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
self._mirrorOutput = False
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return (len(self.failures) + len(self.errors) == 0)
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
| apache-2.0 |
axinging/chromium-crosswalk | chrome/test/media_router/telemetry/benchmarks/pagesets/media_router_page.py | 4 | 4188 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import time
import utils
from telemetry import page
from telemetry import story
from telemetry.core import exceptions
class CastPage(page.Page):
"""Abstract Cast page for Media Router Telemetry tests."""
def ChooseSink(self, tab, sink_name):
"""Chooses a specific sink in the list."""
tab.ExecuteJavaScript(
'var sinks = window.document.getElementById("media-router-container").'
' shadowRoot.getElementById("sink-list").getElementsByTagName("span");'
'for (var i=0; i<sinks.length; i++) {'
' if(sinks[i].textContent.trim() == "%s") {'
' sinks[i].click();'
' break;'
'}}' % sink_name);
def CloseDialog(self, tab):
"""Closes media router dialog."""
try:
tab.ExecuteJavaScript(
'window.document.getElementById("media-router-container").' +
'shadowRoot.getElementById("container-header").shadowRoot.' +
'getElementById("close-button").click();')
except exceptions.DevtoolsTargetCrashException:
# Ignore the crash exception, this exception is caused by the js
# code which closes the dialog, it is expected.
pass
def CloseExistingRoute(self, action_runner, sink_name):
"""Closes the existing route if it exists, otherwise does nothing."""
action_runner.TapElement(selector='#start_session_button')
action_runner.Wait(5)
for tab in action_runner.tab.browser.tabs:
if tab.url == 'chrome://media-router/':
if self.CheckIfExistingRoute(tab, sink_name):
self.ChooseSink(tab, sink_name)
tab.ExecuteJavaScript(
"window.document.getElementById('media-router-container')."
"shadowRoot.getElementById('route-details').shadowRoot."
"getElementById('close-route-button').click();")
self.CloseDialog(tab)
# Wait for 5s to make sure the route is closed.
action_runner.Wait(5)
def CheckIfExistingRoute(self, tab, sink_name):
""""Checks if there is existing route for the specific sink."""
tab.ExecuteJavaScript(
"var sinks = window.document.getElementById('media-router-container')."
" allSinks;"
"var sink_id = null;"
"for (var i=0; i<sinks.length; i++) {"
" if (sinks[i].name == '%s') {"
" console.info('sink id: ' + sinks[i].id); "
" sink_id = sinks[i].id;"
" break;"
" }"
"}"
"var routes = window.document.getElementById('media-router-container')."
" routeList;"
"for (var i=0; i<routes.length; i++) {"
" if (!!sink_id && routes[i].sinkId == sink_id) {"
" window.__telemetry_route_id = routes[i].id;"
" break;"
" }"
"}" % sink_name)
route = tab.EvaluateJavaScript('!!window.__telemetry_route_id')
logging.info('Is there existing route? ' + str(route))
return route
def ExecuteAsyncJavaScript(self, action_runner, script, verify_func,
error_message, timeout=5):
"""Executes async javascript function and waits until it finishes."""
action_runner.ExecuteJavaScript(script)
self._WaitForResult(action_runner, verify_func, error_message,
timeout=timeout)
def _WaitForResult(self, action_runner, verify_func, error_message,
timeout=5):
"""Waits until the function finishes or timeout."""
start_time = time.time()
while (not verify_func() and
time.time() - start_time < timeout):
action_runner.Wait(1)
if not verify_func():
raise page.page_test.Failure(error_message)
def _GetDeviceName(self):
"""Gets device name from environment variable RECEIVER_NAME."""
if 'RECEIVER_IP' not in os.environ or not os.environ.get('RECEIVER_IP'):
raise page.page_test.Failure(
'Your test machine is not set up correctly, '
'RECEIVER_IP enviroment variable is missing.')
return utils.GetDeviceName(os.environ.get('RECEIVER_IP'))
| bsd-3-clause |
Vangreen/android_kernel_lge_msm8226 | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
vicky2135/lucious | oscar/lib/python2.7/site-packages/faker/providers/address/en_GB/__init__.py | 4 | 5719 | from __future__ import unicode_literals
from collections import OrderedDict
from ..en import Provider as AddressProvider
class Provider(AddressProvider):
city_prefixes = ('North', 'East', 'West', 'South', 'New', 'Lake', 'Port')
city_suffixes = (
'town', 'ton', 'land', 'ville', 'berg', 'burgh', 'borough', 'bury', 'view', 'port', 'mouth', 'stad', 'furt',
'chester', 'mouth', 'fort', 'haven', 'side', 'shire',
)
building_number_formats = ('#', '##', '###')
street_suffixes = (
'alley', 'avenue', 'branch', 'bridge', 'brook', 'brooks', 'burg', 'burgs', 'bypass', 'camp', 'canyon', 'cape',
'causeway', 'center', 'centers', 'circle', 'circles', 'cliff', 'cliffs', 'club', 'common', 'corner', 'corners',
'course', 'court', 'courts', 'cove', 'coves', 'creek', 'crescent', 'crest', 'crossing', 'crossroad', 'curve',
'dale', 'dam', 'divide', 'drive', 'drive', 'drives', 'estate', 'estates', 'expressway', 'extension',
'extensions',
'fall', 'falls', 'ferry', 'field', 'fields', 'flat', 'flats', 'ford', 'fords', 'forest', 'forge', 'forges',
'fork',
'forks', 'fort', 'freeway', 'garden', 'gardens', 'gateway', 'glen', 'glens', 'green', 'greens', 'grove',
'groves',
'harbor', 'harbors', 'haven', 'heights', 'highway', 'hill', 'hills', 'hollow', 'inlet', 'inlet', 'island',
'island',
'islands', 'islands', 'isle', 'isle', 'junction', 'junctions', 'key', 'keys', 'knoll', 'knolls', 'lake',
'lakes',
'land', 'landing', 'lane', 'light', 'lights', 'loaf', 'lock', 'locks', 'locks', 'lodge', 'lodge', 'loop',
'mall',
'manor', 'manors', 'meadow', 'meadows', 'mews', 'mill', 'mills', 'mission', 'mission', 'motorway', 'mount',
'mountain', 'mountain', 'mountains', 'mountains', 'neck', 'orchard', 'oval', 'overpass', 'park', 'parks',
'parkway',
'parkways', 'pass', 'passage', 'path', 'pike', 'pine', 'pines', 'place', 'plain', 'plains', 'plains', 'plaza',
'plaza', 'point', 'points', 'port', 'port', 'ports', 'ports', 'prairie', 'prairie', 'radial', 'ramp', 'ranch',
'rapid', 'rapids', 'rest', 'ridge', 'ridges', 'river', 'road', 'road', 'roads', 'roads', 'route', 'row', 'rue',
'run', 'shoal', 'shoals', 'shore', 'shores', 'skyway', 'spring', 'springs', 'springs', 'spur', 'spurs',
'square',
'square', 'squares', 'squares', 'station', 'station', 'stravenue', 'stravenue', 'stream', 'stream', 'street',
'street', 'streets', 'summit', 'summit', 'terrace', 'throughway', 'trace', 'track', 'trafficway', 'trail',
'trail',
'tunnel', 'tunnel', 'turnpike', 'turnpike', 'underpass', 'union', 'unions', 'valley', 'valleys', 'via',
'viaduct',
'view', 'views', 'village', 'village', 'villages', 'ville', 'vista', 'vista', 'walk', 'walks', 'wall', 'way',
'ways', 'well', 'wells')
POSTAL_ZONES = (
'AB', 'AL', 'B' , 'BA', 'BB', 'BD', 'BH', 'BL', 'BN', 'BR',
'BS', 'BT', 'CA', 'CB', 'CF', 'CH', 'CM', 'CO', 'CR', 'CT',
'CV', 'CW', 'DA', 'DD', 'DE', 'DG', 'DH', 'DL', 'DN', 'DT',
'DY', 'E' , 'EC', 'EH', 'EN', 'EX', 'FK', 'FY', 'G' , 'GL',
'GY', 'GU', 'HA', 'HD', 'HG', 'HP', 'HR', 'HS', 'HU', 'HX',
'IG', 'IM', 'IP', 'IV', 'JE', 'KA', 'KT', 'KW', 'KY', 'L' ,
'LA', 'LD', 'LE', 'LL', 'LN', 'LS', 'LU', 'M' , 'ME', 'MK',
'ML', 'N' , 'NE', 'NG', 'NN', 'NP', 'NR', 'NW', 'OL', 'OX',
'PA', 'PE', 'PH', 'PL', 'PO', 'PR', 'RG', 'RH', 'RM', 'S' ,
'SA', 'SE', 'SG', 'SK', 'SL', 'SM', 'SN', 'SO', 'SP', 'SR',
'SS', 'ST', 'SW', 'SY', 'TA', 'TD', 'TF', 'TN', 'TQ', 'TR',
'TS', 'TW', 'UB', 'W' , 'WA', 'WC', 'WD', 'WF', 'WN', 'WR',
'WS', 'WV', 'YO', 'ZE'
)
POSTAL_ZONES_ONE_CHAR = [zone for zone in POSTAL_ZONES if len(zone) == 1]
POSTAL_ZONES_TWO_CHARS = [zone for zone in POSTAL_ZONES if len(zone) == 2]
postcode_formats = (
'AN NEE',
'ANN NEE',
'PN NEE',
'PNN NEE',
'ANC NEE',
'PND NEE',
)
_postcode_sets = OrderedDict((
(' ', ' '),
('N', [str(i) for i in range(0, 10)]),
('A', POSTAL_ZONES_ONE_CHAR),
('B', 'ABCDEFGHKLMNOPQRSTUVWXY'),
('C', 'ABCDEFGHJKSTUW'),
('D', 'ABEHMNPRVWXY'),
('E', 'ABDEFGHJLNPQRSTUWXYZ'),
('P', POSTAL_ZONES_TWO_CHARS),
))
city_formats = (
'{{city_prefix}} {{first_name}}{{city_suffix}}',
'{{city_prefix}} {{first_name}}',
'{{first_name}}{{city_suffix}}',
'{{last_name}}{{city_suffix}}',
)
street_name_formats = (
'{{first_name}} {{street_suffix}}',
'{{last_name}} {{street_suffix}}'
)
street_address_formats = (
'{{building_number}} {{street_name}}',
'{{secondary_address}}\n{{street_name}}',
)
address_formats = (
"{{street_address}}\n{{city}}\n{{postcode}}",
)
secondary_address_formats = ('Flat #', 'Flat ##', 'Flat ##?', 'Studio #', 'Studio ##', 'Studio ##?')
@classmethod
def postcode(cls):
"""
See http://web.archive.org/web/20090930140939/http://www.govtalk.gov.uk/gdsc/html/noframes/PostCode-2-1-Release.htm
"""
postcode = ''
pattern = cls.random_element(cls.postcode_formats)
for placeholder in pattern:
postcode += cls.random_element(cls._postcode_sets[placeholder])
return postcode
@classmethod
def city_prefix(cls):
return cls.random_element(cls.city_prefixes)
@classmethod
def secondary_address(cls):
return cls.bothify(cls.random_element(cls.secondary_address_formats))
| bsd-3-clause |
cslzchen/osf.io | addons/s3/migrations/0003_auto_20170713_1125.py | 22 | 1431 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-13 16:25
from __future__ import unicode_literals
import pytz
import datetime
from django.db import migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('addons_s3', '0002_auto_20170323_1534'),
]
operations = [
migrations.AddField(
model_name='nodesettings',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, default=datetime.datetime(1970, 1, 1, 0, 0, tzinfo=pytz.utc), verbose_name='created'),
preserve_default=False,
),
migrations.AddField(
model_name='nodesettings',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
migrations.AddField(
model_name='usersettings',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, default=datetime.datetime(1970, 1, 1, 0, 0, tzinfo=pytz.utc), verbose_name='created'),
preserve_default=False,
),
migrations.AddField(
model_name='usersettings',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
]
| apache-2.0 |
Tyler2004/pychess | lib/pychess/Database/PgnImport.py | 20 | 16559 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import zipfile
from datetime import date
from array import array
from .profilehooks import profile
from sqlalchemy import select, Index, func, and_
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.schema import DropIndex
from pychess.compat import unicode
from pychess.Utils.const import *
from pychess.Utils.lutils.LBoard import LBoard
from pychess.Savers.ChessFile import LoadingError
from pychess.Savers.pgnbase import pgn_load
from pychess.Database.dbwalk import walk
from pychess.Database.model import engine, metadata, collection, event,\
site, player, game, annotator, ini_collection
CHUNK = 1000
EVENT, SITE, PLAYER, ANNOTATOR, COLLECTION = range(5)
removeDic = {
ord(u"'"): None,
ord(u","): None,
ord(u"."): None,
ord(u"-"): None,
ord(u" "): None,
}
LBoard_FEN_START = LBoard()
LBoard_FEN_START.applyFen(FEN_START)
class PgnImport():
def __init__(self):
self.conn = engine.connect()
self.ins_collection = collection.insert()
self.ins_event = event.insert()
self.ins_site = site.insert()
self.ins_player = player.insert()
self.ins_annotator = annotator.insert()
self.ins_game = game.insert()
self.collection_dict = {}
self.event_dict = {}
self.site_dict = {}
self.player_dict = {}
self.annotator_dict = {}
self.next_id = [0, 0, 0, 0, 0]
self.next_id[COLLECTION] = self.ini_names(collection, COLLECTION)
self.next_id[EVENT] = self.ini_names(event, EVENT)
self.next_id[SITE] = self.ini_names(site, SITE)
self.next_id[PLAYER] = self.ini_names(player, PLAYER)
self.next_id[ANNOTATOR] = self.ini_names(annotator, ANNOTATOR)
def get_id(self, name, name_table, field):
if not name:
return None
orig_name = name
if field == COLLECTION:
name_dict = self.collection_dict
name_data = self.collection_data
name = os.path.basename(name)[:-4]
elif field == EVENT:
name_dict = self.event_dict
name_data = self.event_data
elif field == SITE:
name_dict = self.site_dict
name_data = self.site_data
elif field == ANNOTATOR:
name_dict = self.annotator_dict
name_data = self.annotator_data
elif field == PLAYER:
name_dict = self.player_dict
name_data = self.player_data
# Some .pgn use country after player names
if name[-4:-3]==" " and name[-3:].isupper():
name = name[:-4]
name = name.title().translate(removeDic)
if name in name_dict:
return name_dict[name]
else:
if field == COLLECTION:
name_data.append({'source': orig_name, 'name': name})
else:
name_data.append({'name': orig_name})
name_dict[name] = self.next_id[field]
self.next_id[field] += 1
return name_dict[name]
def ini_names(self, name_table, field):
s = select([name_table])
name_dict = dict([(n.name.title().translate(removeDic), n.id) for n in self.conn.execute(s)])
if field == COLLECTION:
self.collection_dict = name_dict
elif field == EVENT:
self.event_dict = name_dict
elif field == SITE:
self.site_dict = name_dict
elif field == PLAYER:
self.player_dict = name_dict
elif field == ANNOTATOR:
self.annotator_dict = name_dict
s = select([func.max(name_table.c.id).label('maxid')])
maxid = self.conn.execute(s).scalar()
if maxid is None:
next_id = 1
else:
next_id = maxid + 1
return next_id
#@profile
def do_import(self, filename):
print(filename)
# collect new names not in they dict yet
self.collection_data = []
self.event_data = []
self.site_data = []
self.player_data = []
self.annotator_data = []
# collect new games and commit them in big chunks for speed
self.game_data = []
if filename.lower().endswith(".zip") and zipfile.is_zipfile(filename):
zf = zipfile.ZipFile(filename, "r")
files = [f for f in zf.namelist() if f.lower().endswith(".pgn")]
else:
zf = None
files = [filename]
for pgnfile in files:
if zf is None:
cf = pgn_load(open(pgnfile, "rU"))
else:
cf = pgn_load(zf.open(pgnfile, "rU"))
# use transaction to avoid autocommit slowness
trans = self.conn.begin()
try:
for i, game in enumerate(cf.games):
#print i+1#, cf.get_player_names(i)
movelist = array("H")
comments = []
cf.error = None
fenstr = cf._getTag(i, "FEN")
variant = cf.get_variant(i)
# Fixes for some non statndard Chess960 .pgn
if variant==0 and (fenstr is not None) and "Chess960" in cf._getTag(i,"Event"):
cf.tagcache[i]["Variant"] = "Fischerandom"
variant = 1
parts = fenstr.split()
parts[0] = parts[0].replace(".", "/").replace("0", "")
if len(parts) == 1:
parts.append("w")
parts.append("-")
parts.append("-")
fenstr = " ".join(parts)
if variant:
board = LBoard(FISCHERRANDOMCHESS)
else:
board = LBoard()
if fenstr:
try:
board.applyFen(fenstr)
except SyntaxError as e:
print(_("The game #%s can't be loaded, because of an error parsing FEN") % (i+1), e.args[0])
continue
else:
board = LBoard_FEN_START.clone()
boards = [board]
movetext = cf.get_movetext(i)
boards = cf.parse_string(movetext, boards[0], -1)
if cf.error is not None:
print("ERROR in game #%s" % (i+1), cf.error.args[0])
continue
walk(boards[0], movelist, comments)
if not movelist:
if (not comments) and (cf._getTag(i, 'White') is None) and (cf._getTag(i, 'Black') is None):
print("empty game")
continue
event_id = self.get_id(cf._getTag(i, 'Event'), event, EVENT)
site_id = self.get_id(cf._getTag(i, 'Site'), site, SITE)
game_date = cf._getTag(i, 'Date')
if game_date and not '?' in game_date:
ymd = game_date.split('.')
if len(ymd) == 3:
game_year, game_month, game_day = map(int, ymd)
else:
game_year, game_month, game_day = int(game_date[:4]), None, None
elif game_date and not '?' in game_date[:4]:
game_year, game_month, game_day = int(game_date[:4]), None, None
else:
game_year, game_month, game_day = None, None, None
game_round = cf._getTag(i, 'Round')
white, black = cf.get_player_names(i)
white_id = self.get_id(white, player, PLAYER)
black_id = self.get_id(black, player, PLAYER)
result = cf.get_result(i)
white_elo = cf._getTag(i, 'WhiteElo')
white_elo = int(white_elo) if white_elo and white_elo.isdigit() else None
black_elo = cf._getTag(i, 'BlackElo')
black_elo = int(black_elo) if black_elo and black_elo.isdigit() else None
ply_count = cf._getTag(i, "PlyCount")
event_date = cf._getTag(i, 'EventDate')
eco = cf._getTag(i, "ECO")
eco = eco[:3] if eco else None
fen = cf._getTag(i, "FEN")
variant = cf.get_variant(i)
board = cf._getTag(i, "Board")
annotator = cf._getTag(i, "Annotator")
annotator_id = self.get_id(annotator, annotator, ANNOTATOR)
collection_id = self.get_id(unicode(pgnfile), collection, COLLECTION)
self.game_data.append({
'event_id': event_id,
'site_id': site_id,
'date_year': game_year,
'date_month': game_month,
'date_day': game_day,
'round': game_round,
'white_id': white_id,
'black_id': black_id,
'result': result,
'white_elo': white_elo,
'black_elo': black_elo,
'ply_count': ply_count,
'eco': eco,
'fen': fen,
'variant': variant,
'board': board,
'annotator_id': annotator_id,
'collection_id': collection_id,
'movelist': movelist.tostring(),
'comments': u"|".join(comments),
})
if len(self.game_data) >= CHUNK:
if self.collection_data:
self.conn.execute(self.ins_collection, self.collection_data)
self.collection_data = []
if self.event_data:
self.conn.execute(self.ins_event, self.event_data)
self.event_data = []
if self.site_data:
self.conn.execute(self.ins_site, self.site_data)
self.site_data = []
if self.player_data:
self.conn.execute(self.ins_player, self.player_data)
self.player_data = []
if self.annotator_data:
self.conn.execute(self.ins_annotator, self.annotator_data)
self.annotator_data = []
self.conn.execute(self.ins_game, self.game_data)
self.game_data = []
print(pgnfile, i+1)
if self.collection_data:
self.conn.execute(self.ins_collection, self.collection_data)
self.collection_data = []
if self.event_data:
self.conn.execute(self.ins_event, self.event_data)
self.event_data = []
if self.site_data:
self.conn.execute(self.ins_site, self.site_data)
self.site_data = []
if self.player_data:
self.conn.execute(self.ins_player, self.player_data)
self.player_data = []
if self.annotator_data:
self.conn.execute(self.ins_annotator, self.annotator_data)
self.annotator_data = []
if self.game_data:
self.conn.execute(self.ins_game, self.game_data)
self.game_data = []
print(pgnfile, i+1)
trans.commit()
except ProgrammingError as e:
trans.rollback()
print("Importing %s failed! %s" % (file, e))
def import_FIDE_players(self):
#print 'drop index'
#idx = Index('ix_player_name', player.c.name)
#self.conn.execute(DropIndex(idx))
#print 'import FIDE players'
#import_players()
#print 'create index'
#idx = Index('ix_player_name', player.c.name)
#idx.create(engine)
ins_player = player.insert()
player_data = []
with open("players_list.txt") as f:
# use transaction to avoid autocommit slowness
trans = self.conn.begin()
try:
for i, line in enumerate(f):
if i==0:
continue
elo = line[53:58].rstrip()
elo = int(elo) if elo else None
born = line[64:68].rstrip()
born = int(born) if born else None
title = line[44:46].rstrip()
title = title if title else None
player_data.append({
"fideid": int(line[:8]),
"name": line[10:42].rstrip(),
"title": title,
"fed": line[48:51],
"elo": elo,
"born": born,
})
if len(player_data) >= CHUNK:
self.conn.execute(ins_player, player_data)
player_data = []
print(i)
if player_data:
self.conn.execute(ins_player, player_data)
print(i+1)
trans.commit()
except:
trans.rollback()
raise
def print_db(self):
a1 = event.alias()
a2 = site.alias()
a3 = player.alias()
a4 = player.alias()
s = select([game.c.id, a1.c.name.label('event'), a2.c.name.label('site'), a3.c.name.label('white'), a4.c.name.label('black'),
game.c.date_year, game.c.date_month, game.c.date_day, game.c.eco,
game.c.result, game.c.white_elo, game.c.black_elo],
and_(
game.c.event_id==a1.c.id,
game.c.site_id==a2.c.id,
game.c.white_id==a3.c.id,
game.c.black_id==a4.c.id)).where(and_(a3.c.name.startswith(u"Réti"), a4.c.name.startswith(u"Van Nüss")))
result = self.conn.execute(s)
games = result.fetchall()
for g in games:
print("%s %s %s %s %s %s %s %s %s %s %s %s" % (g['id'], g['event'], g['site'], g['white'], g['black'],
g[5], g[6], g[7], g['eco'], reprResult[g['result']], g['white_elo'], g['black_elo']))
if __name__ == "__main__":
if 1:
metadata.drop_all(engine)
metadata.create_all(engine)
imp = PgnImport()
from .timer import Timer
if len(sys.argv) > 1:
arg = sys.argv[1]
with Timer() as t:
if arg[-4:].lower() in (".pgn", ".zip"):
if os.path.isfile(arg):
imp.do_import(arg)
elif os.path.exists(arg):
for file in sorted(os.listdir(arg)):
if file[-4:].lower() in (".pgn", ".zip"):
imp.do_import(os.path.join(arg, file))
print("Elapsed time (secs): %s" % t.elapsed_secs)
else:
path = os.path.abspath(os.path.dirname(__file__))
with Timer() as t:
imp.do_import(os.path.join('../../../testing/gamefiles', "annotated.pgn"))
imp.do_import(os.path.join('../../../testing/gamefiles', "world_matches.pgn"))
imp.do_import(os.path.join('../../../testing/gamefiles', "dortmund.pgn"))
imp.do_import(os.path.join('../../../testing/gamefiles', "twic923.pgn"))
print("Elapsed time (secs): %s" % t.elapsed_secs)
print("Old: 28.68")
imp.print_db()
| gpl-3.0 |
peerster/CouchPotatoServer | libs/tornado/autoreload.py | 66 | 12031 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Automatically restart the server when a source file is modified.
Most applications should not access this module directly. Instead,
pass the keyword argument ``autoreload=True`` to the
`tornado.web.Application` constructor (or ``debug=True``, which
enables this setting and several others). This will enable autoreload
mode as well as checking for changes to templates and static
resources. Note that restarting is a destructive operation and any
requests in progress will be aborted when the process restarts. (If
you want to disable autoreload while using other debug-mode features,
pass both ``debug=True`` and ``autoreload=False``).
This module can also be used as a command-line wrapper around scripts
such as unit test runners. See the `main` method for details.
The command-line wrapper and Application debug modes can be used together.
This combination is encouraged as the wrapper catches syntax errors and
other import-time failures, while debug mode catches changes once
the server has started.
This module depends on `.IOLoop`, so it will not work in WSGI applications
and Google App Engine. It also will not work correctly when `.HTTPServer`'s
multi-process mode is used.
Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
because it re-executes Python using ``sys.executable`` and ``sys.argv``.
Additionally, modifying these variables will cause reloading to behave
incorrectly.
"""
from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
# sys.path handling
# -----------------
#
# If a module is run with "python -m", the current directory (i.e. "")
# is automatically prepended to sys.path, but not if it is run as
# "path/to/file.py". The processing for "-m" rewrites the former to
# the latter, so subsequent executions won't have the same path as the
# original.
#
# Conversely, when run as path/to/file.py, the directory containing
# file.py gets added to the path, which can cause confusion as imports
# may become relative in spite of the future import.
#
# We address the former problem by setting the $PYTHONPATH environment
# variable before re-execution so the new process will see the correct
# path. We attempt to address the latter problem when tornado.autoreload
# is run as __main__, although we can't fix the general case because
# we cannot reliably reconstruct the original command line
# (http://bugs.python.org/issue14208).
if __name__ == "__main__":
# This sys.path manipulation must come before our imports (as much
# as possible - if we introduced a tornado.sys or tornado.os
# module we'd be in trouble), or else our imports would become
# relative again despite the future import.
#
# There is a separate __main__ block at the end of the file to call main().
if sys.path[0] == os.path.dirname(__file__):
del sys.path[0]
import functools
import logging
import os
import pkgutil
import sys
import traceback
import types
import subprocess
import weakref
from tornado import ioloop
from tornado.log import gen_log
from tornado import process
from tornado.util import exec_in
try:
import signal
except ImportError:
signal = None
_watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary()
def start(io_loop=None, check_time=500):
"""Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
io_loop = io_loop or ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
gen_log.warning("tornado.autoreload started more than once in the same process")
add_reload_hook(functools.partial(io_loop.close, all_fds=True))
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop)
scheduler.start()
def wait():
"""Wait for a watched file to change, then restart the process.
Intended to be used at the end of scripts like unit test runners,
to run the tests again after any source file changes (but see also
the command-line interface in `main`)
"""
io_loop = ioloop.IOLoop()
start(io_loop)
io_loop.start()
def watch(filename):
"""Add a file to the watch list.
All imported modules are watched by default.
"""
_watched_files.add(filename)
def add_reload_hook(fn):
"""Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
``tornado.platform.auto.set_close_exec``) instead
of using a reload hook to close them.
"""
_reload_hooks.append(fn)
def _reload_on_update(modify_times):
if _reload_attempted:
# We already tried to reload and it didn't work, so don't try again.
return
if process.task_id() is not None:
# We're in a child process created by fork_processes. If child
# processes restarted themselves, they'd all restart and then
# all call fork_processes again.
return
for module in sys.modules.values():
# Some modules play games with sys.modules (e.g. email/__init__.py
# in the standard library), and occasionally this can cause strange
# failures in getattr. Just ignore anything that's not an ordinary
# module.
if not isinstance(module, types.ModuleType):
continue
path = getattr(module, "__file__", None)
if not path:
continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
_check_file(modify_times, path)
for path in _watched_files:
_check_file(modify_times, path)
def _check_file(modify_times, path):
try:
modified = os.stat(path).st_mtime
except Exception:
return
if path not in modify_times:
modify_times[path] = modified
return
if modify_times[path] != modified:
gen_log.info("%s modified; restarting server", path)
_reload()
def _reload():
global _reload_attempted
_reload_attempted = True
for fn in _reload_hooks:
fn()
if hasattr(signal, "setitimer"):
# Clear the alarm signal set by
# ioloop.set_blocking_log_threshold so it doesn't fire
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If sys.path[0] is an empty
# string, we were (probably) invoked with -m and the effective path
# is about to change on re-exec. Add the current directory to $PYTHONPATH
# to ensure that the new process sees the same path we did.
path_prefix = '.' + os.pathsep
if (sys.path[0] == '' and
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
os.environ["PYTHONPATH"] = (path_prefix +
os.environ.get("PYTHONPATH", ""))
if sys.platform == 'win32':
# os.execv is broken on Windows and can't properly parse command line
# arguments and executable name if they contain whitespaces. subprocess
# fixes that behavior.
subprocess.Popen([sys.executable] + sys.argv)
sys.exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + sys.argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
# re-executing in the current process, start a new one
# and cause the current process to exit. This isn't
# ideal since the new process is detached from the parent
# terminal and thus cannot easily be killed with ctrl-C,
# but it's better than not being able to autoreload at
# all.
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
# this error specifically.
os.spawnv(os.P_NOWAIT, sys.executable,
[sys.executable] + sys.argv)
sys.exit(0)
_USAGE = """\
Usage:
python -m tornado.autoreload -m module.to.run [args...]
python -m tornado.autoreload path/to/script.py [args...]
"""
def main():
"""Command-line wrapper to re-run a script whenever its source changes.
Scripts may be specified by filename or module name::
python -m tornado.autoreload -m tornado.test.runtests
python -m tornado.autoreload tornado/test/runtests.py
Running a script with this wrapper is similar to calling
`tornado.autoreload.wait` at the end of the script, but this wrapper
can catch import-time problems like syntax errors that would otherwise
prevent the script from reaching its call to `wait`.
"""
original_argv = sys.argv
sys.argv = sys.argv[:]
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
mode = "module"
module = sys.argv[2]
del sys.argv[1:3]
elif len(sys.argv) >= 2:
mode = "script"
script = sys.argv[1]
sys.argv = sys.argv[1:]
else:
print(_USAGE, file=sys.stderr)
sys.exit(1)
try:
if mode == "module":
import runpy
runpy.run_module(module, run_name="__main__", alter_sys=True)
elif mode == "script":
with open(script) as f:
global __file__
__file__ = script
# Use globals as our "locals" dictionary so that
# something that tries to import __main__ (e.g. the unittest
# module) will see the right things.
exec_in(f.read(), globals(), globals())
except SystemExit as e:
logging.basicConfig()
gen_log.info("Script exited with status %s", e.code)
except Exception as e:
logging.basicConfig()
gen_log.warning("Script exited with uncaught exception", exc_info=True)
# If an exception occurred at import time, the file with the error
# never made it into sys.modules and so we won't know to watch it.
# Just to make sure we've covered everything, walk the stack trace
# from the exception and watch every file.
for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
watch(filename)
if isinstance(e, SyntaxError):
# SyntaxErrors are special: their innermost stack frame is fake
# so extract_tb won't see it and we have to get the filename
# from the exception object.
watch(e.filename)
else:
logging.basicConfig()
gen_log.info("Script exited normally")
# restore sys.argv so subsequent executions will include autoreload
sys.argv = original_argv
if mode == 'module':
# runpy did a fake import of the module as __main__, but now it's
# no longer in sys.modules. Figure out where it is and watch it.
loader = pkgutil.get_loader(module)
if loader is not None:
watch(loader.get_filename())
wait()
if __name__ == "__main__":
# See also the other __main__ block at the top of the file, which modifies
# sys.path before our imports
main()
| gpl-3.0 |
muzixing/ryu | ryu/contrib/ovs/db/parser.py | 55 | 3331 | # Copyright (c) 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ovs.db import error
class Parser(object):
def __init__(self, json, name):
self.name = name
self.json = json
if type(json) != dict:
self.__raise_error("Object expected.")
self.used = set()
def __get(self, name, types, optional, default=None):
if name in self.json:
self.used.add(name)
member = float_to_int(self.json[name])
if is_identifier(member) and "id" in types:
return member
if len(types) and type(member) not in types:
self.__raise_error("Type mismatch for member '%s'." % name)
return member
else:
if not optional:
self.__raise_error("Required '%s' member is missing." % name)
return default
def get(self, name, types):
return self.__get(name, types, False)
def get_optional(self, name, types, default=None):
return self.__get(name, types, True, default)
def __raise_error(self, message):
raise error.Error("Parsing %s failed: %s" % (self.name, message),
self.json)
def finish(self):
missing = set(self.json) - set(self.used)
if missing:
name = missing.pop()
if len(missing) > 1:
present = "and %d other members are" % len(missing)
elif missing:
present = "and 1 other member are"
else:
present = "is"
self.__raise_error("Member '%s' %s present but not allowed here" %
(name, present))
def float_to_int(x):
# XXX still needed?
if type(x) == float:
integer = int(x)
if integer == x and -2 ** 53 <= integer < 2 ** 53:
return integer
return x
id_re = re.compile("[_a-zA-Z][_a-zA-Z0-9]*$")
def is_identifier(s):
return type(s) in [str, unicode] and id_re.match(s)
def json_type_to_string(type_):
if type_ == None:
return "null"
elif type_ == bool:
return "boolean"
elif type_ == dict:
return "object"
elif type_ == list:
return "array"
elif type_ in [int, long, float]:
return "number"
elif type_ in [str, unicode]:
return "string"
else:
return "<invalid>"
def unwrap_json(json, name, types, desc):
if (type(json) not in (list, tuple) or len(json) != 2 or json[0] != name or
type(json[1]) not in types):
raise error.Error('expected ["%s", <%s>]' % (name, desc), json)
return json[1]
def parse_json_pair(json):
if type(json) != list or len(json) != 2:
raise error.Error("expected 2-element array", json)
return json
| apache-2.0 |
gmoothart/validictory | validictory/tests/test_disallow_unknown_properties.py | 9 | 2556 | from unittest import TestCase
import validictory
class TestDisallowUnknownProperties(TestCase):
def setUp(self):
self.data_simple = {"name": "john doe", "age": 42}
self.schema_simple = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
},
}
self.data_complex = {
"inv_number": "123",
"rows": [
{
"sku": "ab-456",
"desc": "a description",
"price": 100.45
},
{
"sku": "xy-123",
"desc": "another description",
"price": 999.00
}
]
}
self.schema_complex = {
"type": "object",
"properties": {
"inv_number": {"type": "string"},
"rows": {
"type": "array",
"items": {
"sku": {"type": "string"},
"desc": {"type": "string"},
"price": {"type": "number"}
},
}
}
}
def test_disallow_unknown_properties_pass(self):
try:
validictory.validate(self.data_simple, self.schema_simple,
disallow_unknown_properties=True)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_disallow_unknown_properties_fail(self):
self.data_simple["sex"] = "male"
self.assertRaises(validictory.SchemaError, validictory.validate,
self.data_simple, self.schema_simple,
disallow_unknown_properties=True)
def test_disallow_unknown_properties_complex_pass(self):
try:
validictory.validate(self.data_complex, self.schema_complex,
disallow_unknown_properties=True)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_disallow_unknown_properties_complex_fail(self):
newrow = {"sku": "789", "desc": "catch me if you can", "price": 1,
"rice": 666}
self.data_complex["rows"].append(newrow)
self.assertRaises(validictory.SchemaError, validictory.validate,
self.data_complex, self.schema_complex,
disallow_unknown_properties=True)
| mit |
sylarcp/anita | venv/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/base.py | 21 | 89212 | # postgresql/base.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql
:name: PostgreSQL
Sequences/SERIAL
----------------
PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
of creating new primary key values for integer-based primary key columns. When
creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
integer-based primary key columns, which generates a sequence and server side
default corresponding to the column.
To specify a specific named sequence to be used for primary key generation,
use the :func:`~sqlalchemy.schema.Sequence` construct::
Table('sometable', metadata,
Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
)
When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
having the "last insert identifier" available, a RETURNING clause is added to
the INSERT statement which specifies the primary key columns should be
returned after the statement completes. The RETURNING functionality only takes
place if Postgresql 8.2 or later is in use. As a fallback approach, the
sequence, whether specified explicitly or implicitly via ``SERIAL``, is
executed independently beforehand, the returned value to be used in the
subsequent insert. Note that when an
:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
"executemany" semantics, the "last inserted identifier" functionality does not
apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
case.
To force the usage of RETURNING by default off, specify the flag
``implicit_returning=False`` to :func:`.create_engine`.
.. _postgresql_isolation_level:
Transaction Isolation Level
---------------------------
All Postgresql dialects support setting of transaction isolation level
both via a dialect-specific parameter :paramref:`.create_engine.isolation_level`
accepted by :func:`.create_engine`,
as well as the ``isolation_level`` argument as passed to
:meth:`.Connection.execution_options`. When using a non-psycopg2 dialect,
this feature works by issuing the command
``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL <level>`` for
each new connection.
To set isolation level using :func:`.create_engine`::
engine = create_engine(
"postgresql+pg8000://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
The :mod:`~sqlalchemy.dialects.postgresql.psycopg2` and
:mod:`~sqlalchemy.dialects.postgresql.pg8000` dialects also offer the
special level ``AUTOCOMMIT``.
.. seealso::
:ref:`psycopg2_isolation_level`
:ref:`pg8000_isolation_level`
.. _postgresql_schema_reflection:
Remote-Schema Table Introspection and Postgresql search_path
------------------------------------------------------------
The Postgresql dialect can reflect tables from any schema. The
:paramref:`.Table.schema` argument, or alternatively the
:paramref:`.MetaData.reflect.schema` argument determines which schema will
be searched for the table or tables. The reflected :class:`.Table` objects
will in all cases retain this ``.schema`` attribute as was specified.
However, with regards to tables which these :class:`.Table` objects refer to
via foreign key constraint, a decision must be made as to how the ``.schema``
is represented in those remote tables, in the case where that remote
schema name is also a member of the current
`Postgresql search path
<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
By default, the Postgresql dialect mimics the behavior encouraged by
Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function
returns a sample definition for a particular foreign key constraint,
omitting the referenced schema name from that definition when the name is
also in the Postgresql schema search path. The interaction below
illustrates this behavior::
test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY);
CREATE TABLE
test=> CREATE TABLE referring(
test(> id INTEGER PRIMARY KEY,
test(> referred_id INTEGER REFERENCES test_schema.referred(id));
CREATE TABLE
test=> SET search_path TO public, test_schema;
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f'
test-> ;
pg_get_constraintdef
---------------------------------------------------
FOREIGN KEY (referred_id) REFERENCES referred(id)
(1 row)
Above, we created a table ``referred`` as a member of the remote schema
``test_schema``, however when we added ``test_schema`` to the
PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the
``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of
the function.
On the other hand, if we set the search path back to the typical default
of ``public``::
test=> SET search_path TO public;
SET
The same query against ``pg_get_constraintdef()`` now returns the fully
schema-qualified name for us::
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f';
pg_get_constraintdef
---------------------------------------------------------------
FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id)
(1 row)
SQLAlchemy will by default use the return value of ``pg_get_constraintdef()``
in order to determine the remote schema name. That is, if our ``search_path``
were set to include ``test_schema``, and we invoked a table
reflection process as follows::
>>> from sqlalchemy import Table, MetaData, create_engine
>>> engine = create_engine("postgresql://scott:tiger@localhost/test")
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
... referring = Table('referring', meta,
... autoload=True, autoload_with=conn)
...
<sqlalchemy.engine.result.ResultProxy object at 0x101612ed0>
The above process would deliver to the :attr:`.MetaData.tables` collection
``referred`` table named **without** the schema::
>>> meta.tables['referred'].schema is None
True
To alter the behavior of reflection such that the referred schema is
maintained regardless of the ``search_path`` setting, use the
``postgresql_ignore_search_path`` option, which can be specified as a
dialect-specific argument to both :class:`.Table` as well as
:meth:`.MetaData.reflect`::
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
... referring = Table('referring', meta, autoload=True,
... autoload_with=conn,
... postgresql_ignore_search_path=True)
...
<sqlalchemy.engine.result.ResultProxy object at 0x1016126d0>
We will now have ``test_schema.referred`` stored as schema-qualified::
>>> meta.tables['test_schema.referred'].schema
'test_schema'
.. sidebar:: Best Practices for Postgresql Schema reflection
The description of Postgresql schema reflection behavior is complex, and
is the product of many years of dealing with widely varied use cases and
user preferences. But in fact, there's no need to understand any of it if
you just stick to the simplest use pattern: leave the ``search_path`` set
to its default of ``public`` only, never refer to the name ``public`` as
an explicit schema name otherwise, and refer to all other schema names
explicitly when building up a :class:`.Table` object. The options
described here are only for those users who can't, or prefer not to, stay
within these guidelines.
Note that **in all cases**, the "default" schema is always reflected as
``None``. The "default" schema on Postgresql is that which is returned by the
Postgresql ``current_schema()`` function. On a typical Postgresql
installation, this is the name ``public``. So a table that refers to another
which is in the ``public`` (i.e. default) schema will always have the
``.schema`` attribute set to ``None``.
.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path``
dialect-level option accepted by :class:`.Table` and
:meth:`.MetaData.reflect`.
.. seealso::
`The Schema Search Path
<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_
- on the Postgresql website.
INSERT/UPDATE...RETURNING
-------------------------
The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default
for single-row INSERT statements in order to fetch newly generated
primary key identifiers. To specify an explicit ``RETURNING`` clause,
use the :meth:`._UpdateBase.returning` method on a per-statement basis::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
result = table.update().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo').values(name='bar')
print result.fetchall()
# DELETE..RETURNING
result = table.delete().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo')
print result.fetchall()
.. _postgresql_match:
Full Text Search
----------------
SQLAlchemy makes available the Postgresql ``@@`` operator via the
:meth:`.ColumnElement.match` method on any textual column expression.
On a Postgresql dialect, an expression like the following::
select([sometable.c.text.match("search string")])
will emit to the database::
SELECT text @@ to_tsquery('search string') FROM table
The Postgresql text search functions such as ``to_tsquery()``
and ``to_tsvector()`` are available
explicitly using the standard :data:`.func` construct. For example::
select([
func.to_tsvector('fat cats ate rats').match('cat & rat')
])
Emits the equivalent of::
SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat')
The :class:`.postgresql.TSVECTOR` type can provide for explicit CAST::
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy import select, cast
select([cast("some text", TSVECTOR)])
produces a statement equivalent to::
SELECT CAST('some text' AS TSVECTOR) AS anon_1
Full Text Searches in Postgresql are influenced by a combination of: the
PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used
to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in
during a query.
When performing a Full Text Search against a column that has a GIN or
GiST index that is already pre-computed (which is common on full text
searches) one may need to explicitly pass in a particular PostgresSQL
``regconfig`` value to ensure the query-planner utilizes the index and does
not re-compute the column on demand.
In order to provide for this explicit query planning, or to use different
search strategies, the ``match`` method accepts a ``postgresql_regconfig``
keyword argument::
select([mytable.c.id]).where(
mytable.c.title.match('somestring', postgresql_regconfig='english')
)
Emits the equivalent of::
SELECT mytable.id FROM mytable
WHERE mytable.title @@ to_tsquery('english', 'somestring')
One can also specifically pass in a `'regconfig'` value to the
``to_tsvector()`` command as the initial argument::
select([mytable.c.id]).where(
func.to_tsvector('english', mytable.c.title )\
.match('somestring', postgresql_regconfig='english')
)
produces a statement equivalent to::
SELECT mytable.id FROM mytable
WHERE to_tsvector('english', mytable.title) @@
to_tsquery('english', 'somestring')
It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from
PostgresSQL to ensure that you are generating queries with SQLAlchemy that
take full advantage of any indexes you may have created for full text search.
FROM ONLY ...
------------------------
The dialect supports PostgreSQL's ONLY keyword for targeting only a particular
table in an inheritance hierarchy. This can be used to produce the
``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...``
syntaxes. It uses SQLAlchemy's hints mechanism::
# SELECT ... FROM ONLY ...
result = table.select().with_hint(table, 'ONLY', 'postgresql')
print result.fetchall()
# UPDATE ONLY ...
table.update(values=dict(foo='bar')).with_hint('ONLY',
dialect_name='postgresql')
# DELETE FROM ONLY ...
table.delete().with_hint('ONLY', dialect_name='postgresql')
.. _postgresql_indexes:
Postgresql-Specific Index Options
---------------------------------
Several extensions to the :class:`.Index` construct are available, specific
to the PostgreSQL dialect.
Partial Indexes
^^^^^^^^^^^^^^^^
Partial indexes add criterion to the index definition so that the index is
applied to a subset of rows. These can be specified on :class:`.Index`
using the ``postgresql_where`` keyword argument::
Index('my_index', my_table.c.id, postgresql_where=tbl.c.value > 10)
Operator Classes
^^^^^^^^^^^^^^^^^
PostgreSQL allows the specification of an *operator class* for each column of
an index (see
http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html).
The :class:`.Index` construct allows these to be specified via the
``postgresql_ops`` keyword argument::
Index('my_index', my_table.c.id, my_table.c.data,
postgresql_ops={
'data': 'text_pattern_ops',
'id': 'int4_ops'
})
.. versionadded:: 0.7.2
``postgresql_ops`` keyword argument to :class:`.Index` construct.
Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of
the :class:`.Column`, i.e. the name used to access it from the ``.c``
collection of :class:`.Table`, which can be configured to be different than
the actual name of the column as expressed in the database.
Index Types
^^^^^^^^^^^^
PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well
as the ability for users to create their own (see
http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be
specified on :class:`.Index` using the ``postgresql_using`` keyword argument::
Index('my_index', my_table.c.data, postgresql_using='gin')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX command, so it *must* be a valid index type for your
version of PostgreSQL.
.. _postgresql_index_concurrently:
Indexes with CONCURRENTLY
^^^^^^^^^^^^^^^^^^^^^^^^^
The Postgresql index option CONCURRENTLY is supported by passing the
flag ``postgresql_concurrently`` to the :class:`.Index` construct::
tbl = Table('testtbl', m, Column('data', Integer))
idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True)
The above index construct will render SQL as::
CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)
.. versionadded:: 0.9.9
"""
from collections import defaultdict
import re
from ... import sql, schema, exc, util
from ...engine import default, reflection
from ...sql import compiler, expression, operators
from ... import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
CHAR, TEXT, FLOAT, NUMERIC, \
DATE, BOOLEAN, REAL
RESERVED_WORDS = set(
["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
"asymmetric", "both", "case", "cast", "check", "collate", "column",
"constraint", "create", "current_catalog", "current_date",
"current_role", "current_time", "current_timestamp", "current_user",
"default", "deferrable", "desc", "distinct", "do", "else", "end",
"except", "false", "fetch", "for", "foreign", "from", "grant", "group",
"having", "in", "initially", "intersect", "into", "leading", "limit",
"localtime", "localtimestamp", "new", "not", "null", "of", "off",
"offset", "old", "on", "only", "or", "order", "placing", "primary",
"references", "returning", "select", "session_user", "some", "symmetric",
"table", "then", "to", "trailing", "true", "union", "unique", "user",
"using", "variadic", "when", "where", "window", "with", "authorization",
"between", "binary", "cross", "current_schema", "freeze", "full",
"ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
"notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
])
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
class BYTEA(sqltypes.LargeBinary):
__visit_name__ = 'BYTEA'
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = 'DOUBLE_PRECISION'
class INET(sqltypes.TypeEngine):
__visit_name__ = "INET"
PGInet = INET
class CIDR(sqltypes.TypeEngine):
__visit_name__ = "CIDR"
PGCidr = CIDR
class MACADDR(sqltypes.TypeEngine):
__visit_name__ = "MACADDR"
PGMacAddr = MACADDR
class OID(sqltypes.TypeEngine):
"""Provide the Postgresql OID type.
.. versionadded:: 0.9.5
"""
__visit_name__ = "OID"
class TIMESTAMP(sqltypes.TIMESTAMP):
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
class INTERVAL(sqltypes.TypeEngine):
"""Postgresql INTERVAL type.
The INTERVAL type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000 or zxjdbc.
"""
__visit_name__ = 'INTERVAL'
def __init__(self, precision=None):
self.precision = precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(precision=interval.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
PGInterval = INTERVAL
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
def __init__(self, length=None, varying=False):
if not varying:
# BIT without VARYING defaults to length 1
self.length = length or 1
else:
# but BIT VARYING can be unlimited-length, so no default
self.length = length
self.varying = varying
PGBit = BIT
class UUID(sqltypes.TypeEngine):
"""Postgresql UUID type.
Represents the UUID column type, interpreting
data either as natively returned by the DBAPI
or as Python uuid objects.
The UUID type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = 'UUID'
def __init__(self, as_uuid=False):
"""Construct a UUID type.
:param as_uuid=False: if True, values will be interpreted
as Python uuid objects, converting to/from string via the
DBAPI.
"""
if as_uuid and _python_UUID is None:
raise NotImplementedError(
"This version of Python does not support "
"the native UUID type."
)
self.as_uuid = as_uuid
def bind_processor(self, dialect):
if self.as_uuid:
def process(value):
if value is not None:
value = util.text_type(value)
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
if self.as_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
else:
return None
PGUuid = UUID
class TSVECTOR(sqltypes.TypeEngine):
"""The :class:`.postgresql.TSVECTOR` type implements the Postgresql
text search type TSVECTOR.
It can be used to do full text queries on natural language
documents.
.. versionadded:: 0.9.0
.. seealso::
:ref:`postgresql_match`
"""
__visit_name__ = 'TSVECTOR'
class _Slice(expression.ColumnElement):
__visit_name__ = 'slice'
type = sqltypes.NULLTYPE
def __init__(self, slice_, source_comparator):
self.start = source_comparator._check_literal(
source_comparator.expr,
operators.getitem, slice_.start)
self.stop = source_comparator._check_literal(
source_comparator.expr,
operators.getitem, slice_.stop)
class Any(expression.ColumnElement):
"""Represent the clause ``left operator ANY (right)``. ``right`` must be
an array expression.
.. seealso::
:class:`.postgresql.ARRAY`
:meth:`.postgresql.ARRAY.Comparator.any` - ARRAY-bound method
"""
__visit_name__ = 'any'
def __init__(self, left, right, operator=operators.eq):
self.type = sqltypes.Boolean()
self.left = expression._literal_as_binds(left)
self.right = right
self.operator = operator
class All(expression.ColumnElement):
"""Represent the clause ``left operator ALL (right)``. ``right`` must be
an array expression.
.. seealso::
:class:`.postgresql.ARRAY`
:meth:`.postgresql.ARRAY.Comparator.all` - ARRAY-bound method
"""
__visit_name__ = 'all'
def __init__(self, left, right, operator=operators.eq):
self.type = sqltypes.Boolean()
self.left = expression._literal_as_binds(left)
self.right = right
self.operator = operator
class array(expression.Tuple):
"""A Postgresql ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects import postgresql
from sqlalchemy import select, func
stmt = select([
array([1,2]) + array([3,4,5])
])
print stmt.compile(dialect=postgresql.dialect())
Produces the SQL::
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
An instance of :class:`.array` will always have the datatype
:class:`.ARRAY`. The "inner" type of the array is inferred from
the values present, unless the ``type_`` keyword argument is passed::
array(['foo', 'bar'], type_=CHAR)
.. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
See also:
:class:`.postgresql.ARRAY`
"""
__visit_name__ = 'array'
def __init__(self, clauses, **kw):
super(array, self).__init__(*clauses, **kw)
self.type = ARRAY(self.type)
def _bind_param(self, operator, obj):
return array([
expression.BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
for o in obj
])
def self_group(self, against=None):
return self
class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
"""Postgresql ARRAY type.
Represents values as Python lists.
An :class:`.ARRAY` type is constructed given the "type"
of element::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer))
)
The above type represents an N-dimensional array,
meaning Postgresql will interpret values with any number
of dimensions automatically. To produce an INSERT
construct that passes in a 1-dimensional array of integers::
connection.execute(
mytable.insert(),
data=[1,2,3]
)
The :class:`.ARRAY` type can be constructed given a fixed number
of dimensions::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer, dimensions=2))
)
This has the effect of the :class:`.ARRAY` type
specifying that number of bracketed blocks when a :class:`.Table`
is used in a CREATE TABLE statement, or when the type is used
within a :func:`.expression.cast` construct; it also causes
the bind parameter and result set processing of the type
to optimize itself to expect exactly that number of dimensions.
Note that Postgresql itself still allows N dimensions with such a type.
SQL expressions of type :class:`.ARRAY` have support for "index" and
"slice" behavior. The Python ``[]`` operator works normally here, given
integer indexes or slices. Note that Postgresql arrays default
to 1-based indexing. The operator produces binary expression
constructs which will produce the appropriate SQL, both for
SELECT statements::
select([mytable.c.data[5], mytable.c.data[2:7]])
as well as UPDATE statements when the :meth:`.Update.values` method
is used::
mytable.update().values({
mytable.c.data[5]: 7,
mytable.c.data[2:7]: [1, 2, 3]
})
:class:`.ARRAY` provides special methods for containment operations,
e.g.::
mytable.c.data.contains([1, 2])
For a full list of special methods see :class:`.ARRAY.Comparator`.
.. versionadded:: 0.8 Added support for index and slice operations
to the :class:`.ARRAY` type, including support for UPDATE
statements, and special array containment operations.
The :class:`.ARRAY` type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
See also:
:class:`.postgresql.array` - produce a literal array value.
"""
__visit_name__ = 'ARRAY'
class Comparator(sqltypes.Concatenable.Comparator):
"""Define comparison operations for :class:`.ARRAY`."""
def __getitem__(self, index):
shift_indexes = 1 if self.expr.type.zero_indexes else 0
if isinstance(index, slice):
if shift_indexes:
index = slice(
index.start + shift_indexes,
index.stop + shift_indexes,
index.step
)
index = _Slice(index, self)
return_type = self.type
else:
index += shift_indexes
return_type = self.type.item_type
return self._binary_operate(self.expr, operators.getitem, index,
result_type=return_type)
def any(self, other, operator=operators.eq):
"""Return ``other operator ANY (array)`` clause.
Argument places are switched, because ANY requires array
expression to be on the right hand-side.
E.g.::
from sqlalchemy.sql import operators
conn.execute(
select([table.c.data]).where(
table.c.data.any(7, operator=operators.lt)
)
)
:param other: expression to be compared
:param operator: an operator object from the
:mod:`sqlalchemy.sql.operators`
package, defaults to :func:`.operators.eq`.
.. seealso::
:class:`.postgresql.Any`
:meth:`.postgresql.ARRAY.Comparator.all`
"""
return Any(other, self.expr, operator=operator)
def all(self, other, operator=operators.eq):
"""Return ``other operator ALL (array)`` clause.
Argument places are switched, because ALL requires array
expression to be on the right hand-side.
E.g.::
from sqlalchemy.sql import operators
conn.execute(
select([table.c.data]).where(
table.c.data.all(7, operator=operators.lt)
)
)
:param other: expression to be compared
:param operator: an operator object from the
:mod:`sqlalchemy.sql.operators`
package, defaults to :func:`.operators.eq`.
.. seealso::
:class:`.postgresql.All`
:meth:`.postgresql.ARRAY.Comparator.any`
"""
return All(other, self.expr, operator=operator)
def contains(self, other, **kwargs):
"""Boolean expression. Test if elements are a superset of the
elements of the argument array expression.
"""
return self.expr.op('@>')(other)
def contained_by(self, other):
"""Boolean expression. Test if elements are a proper subset of the
elements of the argument array expression.
"""
return self.expr.op('<@')(other)
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
an argument array expression.
"""
return self.expr.op('&&')(other)
def _adapt_expression(self, op, other_comparator):
if isinstance(op, operators.custom_op):
if op.opstring in ['@>', '<@', '&&']:
return op, sqltypes.Boolean
return sqltypes.Concatenable.Comparator.\
_adapt_expression(self, op, other_comparator)
comparator_factory = Comparator
def __init__(self, item_type, as_tuple=False, dimensions=None,
zero_indexes=False):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This will cause the DDL emitted for this
ARRAY to include the exact number of bracket clauses ``[]``,
and will also optimize the performance of the type overall.
Note that PG arrays are always implicitly "non-dimensioned",
meaning they can store any number of dimensions no matter how
they were declared.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and Postgresql one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
.. versionadded:: 0.9.5
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _proc_array(self, arr, itemproc, dim, collection):
if dim is None:
arr = list(arr)
if dim == 1 or dim is None and (
# this has to be (list, tuple), or at least
# not hasattr('__iter__'), since Py3K strings
# etc. have __iter__
not arr or not isinstance(arr[0], (list, tuple))):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
return collection(arr)
else:
return collection(
self._proc_array(
x, itemproc,
dim - 1 if dim is not None else None,
collection)
for x in arr
)
def bind_processor(self, dialect):
item_proc = self.item_type.\
dialect_impl(dialect).\
bind_processor(dialect)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
list)
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.\
dialect_impl(dialect).\
result_processor(dialect, coltype)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
tuple if self.as_tuple else list)
return process
PGArray = ARRAY
class ENUM(sqltypes.Enum):
"""Postgresql ENUM type.
This is a subclass of :class:`.types.Enum` which includes
support for PG's ``CREATE TYPE``.
:class:`~.postgresql.ENUM` is used automatically when
using the :class:`.types.Enum` type on PG assuming
the ``native_enum`` is left as ``True``. However, the
:class:`~.postgresql.ENUM` class can also be instantiated
directly in order to access some additional Postgresql-specific
options, namely finer control over whether or not
``CREATE TYPE`` should be emitted.
Note that both :class:`.types.Enum` as well as
:class:`~.postgresql.ENUM` feature create/drop
methods; the base :class:`.types.Enum` type ultimately
delegates to the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods present here.
"""
def __init__(self, *enums, **kw):
"""Construct an :class:`~.postgresql.ENUM`.
Arguments are the same as that of
:class:`.types.Enum`, but also including
the following parameters.
:param create_type: Defaults to True.
Indicates that ``CREATE TYPE`` should be
emitted, after optionally checking for the
presence of the type, when the parent
table is being created; and additionally
that ``DROP TYPE`` is called when the table
is dropped. When ``False``, no check
will be performed and no ``CREATE TYPE``
or ``DROP TYPE`` is emitted, unless
:meth:`~.postgresql.ENUM.create`
or :meth:`~.postgresql.ENUM.drop`
are called directly.
Setting to ``False`` is helpful
when invoking a creation scheme to a SQL file
without access to the actual database -
the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods can
be used to emit SQL to a target bind.
.. versionadded:: 0.7.4
"""
self.create_type = kw.pop("create_type", True)
super(ENUM, self).__init__(*enums, **kw)
def create(self, bind=None, checkfirst=True):
"""Emit ``CREATE TYPE`` for this
:class:`~.postgresql.ENUM`.
If the underlying dialect does not support
Postgresql CREATE TYPE, no action is taken.
:param bind: a connectable :class:`.Engine`,
:class:`.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type does not exist already before
creating.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
not bind.dialect.has_type(
bind, self.name, schema=self.schema):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
"""Emit ``DROP TYPE`` for this
:class:`~.postgresql.ENUM`.
If the underlying dialect does not support
Postgresql DROP TYPE, no action is taken.
:param bind: a connectable :class:`.Engine`,
:class:`.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type actually exists before dropping.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(DropEnumType(self))
def _check_for_name_in_memos(self, checkfirst, kw):
"""Look in the 'ddl runner' for 'memos', then
note our name in that collection.
This to ensure a particular named enum is operated
upon only once within any kind of create/drop
sequence without relying upon "checkfirst".
"""
if not self.create_type:
return True
if '_ddl_runner' in kw:
ddl_runner = kw['_ddl_runner']
if '_pg_enums' in ddl_runner.memo:
pg_enums = ddl_runner.memo['_pg_enums']
else:
pg_enums = ddl_runner.memo['_pg_enums'] = set()
present = self.name in pg_enums
pg_enums.add(self.name)
return present
else:
return False
def _on_table_create(self, target, bind, checkfirst, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
def _on_metadata_create(self, target, bind, checkfirst, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
def _on_metadata_drop(self, target, bind, checkfirst, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.drop(bind=bind, checkfirst=checkfirst)
colspecs = {
sqltypes.Interval: INTERVAL,
sqltypes.Enum: ENUM,
}
ischema_names = {
'integer': INTEGER,
'bigint': BIGINT,
'smallint': SMALLINT,
'character varying': VARCHAR,
'character': CHAR,
'"char"': sqltypes.String,
'name': sqltypes.String,
'text': TEXT,
'numeric': NUMERIC,
'float': FLOAT,
'real': REAL,
'inet': INET,
'cidr': CIDR,
'uuid': UUID,
'bit': BIT,
'bit varying': BIT,
'macaddr': MACADDR,
'oid': OID,
'double precision': DOUBLE_PRECISION,
'timestamp': TIMESTAMP,
'timestamp with time zone': TIMESTAMP,
'timestamp without time zone': TIMESTAMP,
'time with time zone': TIME,
'time without time zone': TIME,
'date': DATE,
'time': TIME,
'bytea': BYTEA,
'boolean': BOOLEAN,
'interval': INTERVAL,
'interval year to month': INTERVAL,
'interval day to second': INTERVAL,
'tsvector': TSVECTOR
}
class PGCompiler(compiler.SQLCompiler):
def visit_array(self, element, **kw):
return "ARRAY[%s]" % self.visit_clauselist(element, **kw)
def visit_slice(self, element, **kw):
return "%s:%s" % (
self.process(element.start, **kw),
self.process(element.stop, **kw),
)
def visit_any(self, element, **kw):
return "%s%sANY (%s)" % (
self.process(element.left, **kw),
compiler.OPERATORS[element.operator],
self.process(element.right, **kw)
)
def visit_all(self, element, **kw):
return "%s%sALL (%s)" % (
self.process(element.left, **kw),
compiler.OPERATORS[element.operator],
self.process(element.right, **kw)
)
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
regconfig = self.render_literal_value(
binary.modifiers['postgresql_regconfig'],
sqltypes.STRINGTYPE)
if regconfig:
return "%s @@ to_tsquery(%s, %s)" % (
self.process(binary.left, **kw),
regconfig,
self.process(binary.right, **kw)
)
return "%s @@ to_tsquery(%s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s ILIKE %s' % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT ILIKE %s' % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def render_literal_value(self, value, type_):
value = super(PGCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace('\\', '\\\\')
return value
def visit_sequence(self, seq):
return "nextval('%s')" % self.preparer.format_sequence(seq)
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += " \n LIMIT " + self.process(sql.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += " \n LIMIT ALL"
text += " OFFSET " + self.process(sql.literal(select._offset))
return text
def format_from_hint_text(self, sqltext, table, hint, iscrud):
if hint.upper() != 'ONLY':
raise exc.CompileError("Unrecognized hint: %r" % hint)
return "ONLY " + sqltext
def get_select_precolumns(self, select):
if select._distinct is not False:
if select._distinct is True:
return "DISTINCT "
elif isinstance(select._distinct, (list, tuple)):
return "DISTINCT ON (" + ', '.join(
[self.process(col) for col in select._distinct]
) + ") "
else:
return "DISTINCT ON (" + self.process(select._distinct) + ") "
else:
return ""
def for_update_clause(self, select):
if select._for_update_arg.read:
tmp = " FOR SHARE"
else:
tmp = " FOR UPDATE"
if select._for_update_arg.of:
tables = util.OrderedSet(
c.table if isinstance(c, expression.ColumnClause)
else c for c in select._for_update_arg.of)
tmp += " OF " + ", ".join(
self.process(table, ashint=True)
for table in tables
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
return tmp
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0], **kw)
start = self.process(func.clauses.clauses[1], **kw)
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2], **kw)
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
class PGDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
impl_type = column.type.dialect_impl(self.dialect)
if column.primary_key and \
column is column.table._autoincrement_column and \
(
self.dialect.supports_smallserial or
not isinstance(impl_type, sqltypes.SmallInteger)
) and (
column.default is None or
(
isinstance(column.default, schema.Sequence) and
column.default.optional
)):
if isinstance(impl_type, sqltypes.BigInteger):
colspec += " BIGSERIAL"
elif isinstance(impl_type, sqltypes.SmallInteger):
colspec += " SMALLSERIAL"
else:
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def visit_create_enum_type(self, create):
type_ = create.element
return "CREATE TYPE %s AS ENUM (%s)" % (
self.preparer.format_type(type_),
", ".join(
self.sql_compiler.process(sql.literal(e), literal_binds=True)
for e in type_.enums)
)
def visit_drop_enum_type(self, drop):
type_ = drop.element
return "DROP TYPE %s" % (
self.preparer.format_type(type_)
)
def visit_create_index(self, create):
preparer = self.preparer
index = create.element
self._verify_index_table(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX "
concurrently = index.dialect_options['postgresql']['concurrently']
if concurrently:
text += "CONCURRENTLY "
text += "%s ON %s " % (
self._prepared_index_name(index,
include_schema=False),
preparer.format_table(index.table)
)
using = index.dialect_options['postgresql']['using']
if using:
text += "USING %s " % preparer.quote(using)
ops = index.dialect_options["postgresql"]["ops"]
text += "(%s)" \
% (
', '.join([
self.sql_compiler.process(
expr.self_group()
if not isinstance(expr, expression.ColumnClause)
else expr,
include_table=False, literal_binds=True) +
(
(' ' + ops[expr.key])
if hasattr(expr, 'key')
and expr.key in ops else ''
)
for expr in index.expressions
])
)
whereclause = index.dialect_options["postgresql"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False,
literal_binds=True)
text += " WHERE " + where_compiled
return text
def visit_exclude_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
elements = []
for c in constraint.columns:
op = constraint.operators[c.name]
elements.append(self.preparer.quote(c.name) + ' WITH ' + op)
text += "EXCLUDE USING %s (%s)" % (constraint.using,
', '.join(elements))
if constraint.where is not None:
text += ' WHERE (%s)' % self.sql_compiler.process(
constraint.where,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_TSVECTOR(self, type):
return "TSVECTOR"
def visit_INET(self, type_):
return "INET"
def visit_CIDR(self, type_):
return "CIDR"
def visit_MACADDR(self, type_):
return "MACADDR"
def visit_OID(self, type_):
return "OID"
def visit_FLOAT(self, type_):
if not type_.precision:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': type_.precision}
def visit_DOUBLE_PRECISION(self, type_):
return "DOUBLE PRECISION"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_HSTORE(self, type_):
return "HSTORE"
def visit_JSON(self, type_):
return "JSON"
def visit_JSONB(self, type_):
return "JSONB"
def visit_INT4RANGE(self, type_):
return "INT4RANGE"
def visit_INT8RANGE(self, type_):
return "INT8RANGE"
def visit_NUMRANGE(self, type_):
return "NUMRANGE"
def visit_DATERANGE(self, type_):
return "DATERANGE"
def visit_TSRANGE(self, type_):
return "TSRANGE"
def visit_TSTZRANGE(self, type_):
return "TSTZRANGE"
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_enum(self, type_):
if not type_.native_enum or not self.dialect.supports_native_enum:
return super(PGTypeCompiler, self).visit_enum(type_)
else:
return self.visit_ENUM(type_)
def visit_ENUM(self, type_):
return self.dialect.identifier_preparer.format_type(type_)
def visit_TIMESTAMP(self, type_):
return "TIMESTAMP%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_TIME(self, type_):
return "TIME%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_INTERVAL(self, type_):
if type_.precision is not None:
return "INTERVAL(%d)" % type_.precision
else:
return "INTERVAL"
def visit_BIT(self, type_):
if type_.varying:
compiled = "BIT VARYING"
if type_.length is not None:
compiled += "(%d)" % type_.length
else:
compiled = "BIT(%d)" % type_.length
return compiled
def visit_UUID(self, type_):
return "UUID"
def visit_large_binary(self, type_):
return self.visit_BYTEA(type_)
def visit_BYTEA(self, type_):
return "BYTEA"
def visit_ARRAY(self, type_):
return self.process(type_.item_type) + ('[]' * (type_.dimensions
if type_.dimensions
is not None else 1))
class PGIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
value = value[1:-1].\
replace(self.escape_to_quote, self.escape_quote)
return value
def format_type(self, type_, use_schema=True):
if not type_.name:
raise exc.CompileError("Postgresql ENUM type requires a name.")
name = self.quote(type_.name)
if not self.omit_schema and use_schema and type_.schema is not None:
name = self.quote_schema(type_.schema) + "." + name
return name
class PGInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_oid(self, table_name, schema=None):
"""Return the oid from `table_name` and `schema`."""
return self.dialect.get_table_oid(self.bind, table_name, schema,
info_cache=self.info_cache)
class CreateEnumType(schema._CreateDropBase):
__visit_name__ = "create_enum_type"
class DropEnumType(schema._CreateDropBase):
__visit_name__ = "drop_enum_type"
class PGExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar((
"select nextval('%s')" %
self.dialect.identifier_preparer.format_sequence(seq)), type_)
def get_insert_default(self, column):
if column.primary_key and \
column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar("select %s" %
column.server_default.arg,
column.type)
elif (column.default is None or
(column.default.is_sequence and
column.default.optional)):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
# generates server side.
try:
seq_name = column._postgresql_seq_name
except AttributeError:
tab = column.table.name
col = column.name
tab = tab[0:29 + max(0, (29 - len(col)))]
col = col[0:29 + max(0, (29 - len(tab)))]
name = "%s_%s_seq" % (tab, col)
column._postgresql_seq_name = seq_name = name
sch = column.table.schema
if sch is not None:
exc = "select nextval('\"%s\".\"%s\"')" % \
(sch, seq_name)
else:
exc = "select nextval('\"%s\"')" % \
(seq_name, )
return self._execute_scalar(exc, column.type)
return super(PGExecutionContext, self).get_insert_default(column)
class PGDialect(default.DefaultDialect):
name = 'postgresql'
supports_alter = True
max_identifier_length = 63
supports_sane_rowcount = True
supports_native_enum = True
supports_native_boolean = True
supports_smallserial = True
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = True
postfetch_lastrowid = False
supports_default_values = True
supports_empty_insert = False
supports_multivalues_insert = True
default_paramstyle = 'pyformat'
ischema_names = ischema_names
colspecs = colspecs
statement_compiler = PGCompiler
ddl_compiler = PGDDLCompiler
type_compiler = PGTypeCompiler
preparer = PGIdentifierPreparer
execution_ctx_cls = PGExecutionContext
inspector = PGInspector
isolation_level = None
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": {},
"concurrently": False,
}),
(schema.Table, {
"ignore_search_path": False
})
]
reflection_options = ('postgresql_ignore_search_path', )
_backslash_escapes = True
def __init__(self, isolation_level=None, json_serializer=None,
json_deserializer=None, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_deserializer = json_deserializer
self._json_serializer = json_serializer
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (8, 2) and \
self.__dict__.get('implicit_returning', True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
# pop base Enum type
self.colspecs.pop(sqltypes.Enum, None)
# psycopg2, others may have placed ENUM here as well
self.colspecs.pop(ENUM, None)
# http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689
self.supports_smallserial = self.server_version_info >= (9, 2)
self._backslash_escapes = self.server_version_info < (8, 2) or \
connection.scalar(
"show standard_conforming_strings"
) == 'off'
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('show transaction isolation level')
val = cursor.fetchone()[0]
cursor.close()
return val.upper()
def do_begin_twophase(self, connection, xid):
self.do_begin(connection.connection)
def do_prepare_twophase(self, connection, xid):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
# FIXME: ugly hack to get out of transaction
# context when committing recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
connection.execute("ROLLBACK")
connection.execute("ROLLBACK PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
connection.execute("COMMIT PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
resultset = connection.execute(
sql.text("SELECT gid FROM pg_prepared_xacts"))
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_schema(self, connection, schema):
query = ("select nspname from pg_namespace "
"where lower(nspname)=:schema")
cursor = connection.execute(
sql.text(
query,
bindparams=[
sql.bindparam(
'schema', util.text_type(schema.lower()),
type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
def has_table(self, connection, table_name, schema=None):
# seems like case gets folded in pg_class...
if schema is None:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=current_schema() "
"and relname=:name",
bindparams=[
sql.bindparam('name', util.text_type(table_name),
type_=sqltypes.Unicode)]
)
)
else:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=:schema and "
"relname=:name",
bindparams=[
sql.bindparam('name',
util.text_type(table_name),
type_=sqltypes.Unicode),
sql.bindparam('schema',
util.text_type(schema),
type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
def has_sequence(self, connection, sequence_name, schema=None):
if schema is None:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=current_schema() "
"and relname=:name",
bindparams=[
sql.bindparam('name', util.text_type(sequence_name),
type_=sqltypes.Unicode)
]
)
)
else:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=:schema and relname=:name",
bindparams=[
sql.bindparam('name', util.text_type(sequence_name),
type_=sqltypes.Unicode),
sql.bindparam('schema',
util.text_type(schema),
type_=sqltypes.Unicode)
]
)
)
return bool(cursor.first())
def has_type(self, connection, type_name, schema=None):
if schema is not None:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
WHERE t.typnamespace = n.oid
AND t.typname = :typname
AND n.nspname = :nspname
)
"""
query = sql.text(query)
else:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t
WHERE t.typname = :typname
AND pg_type_is_visible(t.oid)
)
"""
query = sql.text(query)
query = query.bindparams(
sql.bindparam('typname',
util.text_type(type_name), type_=sqltypes.Unicode),
)
if schema is not None:
query = query.bindparams(
sql.bindparam('nspname',
util.text_type(schema), type_=sqltypes.Unicode),
)
cursor = connection.execute(query)
return bool(cursor.scalar())
def _get_server_version_info(self, connection):
v = connection.execute("select version()").scalar()
m = re.match(
'.*(?:PostgreSQL|EnterpriseDB) '
'(\d+)\.(\d+)(?:\.(\d+))?(?:\.\d+)?(?:devel)?',
v)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % v)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
def get_table_oid(self, connection, table_name, schema=None, **kw):
"""Fetch the oid for schema.table_name.
Several reflection methods require the table oid. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_oid = None
if schema is not None:
schema_where_clause = "n.nspname = :schema"
else:
schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
query = """
SELECT c.oid
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE (%s)
AND c.relname = :table_name AND c.relkind in ('r','v')
""" % schema_where_clause
# Since we're binding to unicode, table_name and schema_name must be
# unicode.
table_name = util.text_type(table_name)
if schema is not None:
schema = util.text_type(schema)
s = sql.text(query).bindparams(table_name=sqltypes.Unicode)
s = s.columns(oid=sqltypes.Integer)
if schema:
s = s.bindparams(sql.bindparam('schema', type_=sqltypes.Unicode))
c = connection.execute(s, table_name=table_name, schema=schema)
table_oid = c.scalar()
if table_oid is None:
raise exc.NoSuchTableError(table_name)
return table_oid
@reflection.cache
def get_schema_names(self, connection, **kw):
s = """
SELECT nspname
FROM pg_namespace
ORDER BY nspname
"""
rp = connection.execute(s)
# what about system tables?
if util.py2k:
schema_names = [row[0].decode(self.encoding) for row in rp
if not row[0].startswith('pg_')]
else:
schema_names = [row[0] for row in rp
if not row[0].startswith('pg_')]
return schema_names
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
result = connection.execute(
sql.text("SELECT relname FROM pg_class c "
"WHERE relkind = 'r' "
"AND '%s' = (select nspname from pg_namespace n "
"where n.oid = c.relnamespace) " %
current_schema,
typemap={'relname': sqltypes.Unicode}
)
)
return [row[0] for row in result]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
s = """
SELECT relname
FROM pg_class c
WHERE relkind = 'v'
AND '%(schema)s' = (select nspname from pg_namespace n
where n.oid = c.relnamespace)
""" % dict(schema=current_schema)
if util.py2k:
view_names = [row[0].decode(self.encoding)
for row in connection.execute(s)]
else:
view_names = [row[0] for row in connection.execute(s)]
return view_names
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
s = """
SELECT definition FROM pg_views
WHERE schemaname = :schema
AND viewname = :view_name
"""
rp = connection.execute(sql.text(s),
view_name=view_name, schema=current_schema)
if rp:
if util.py2k:
view_def = rp.scalar().decode(self.encoding)
else:
view_def = rp.scalar()
return view_def
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
SQL_COLS = """
SELECT a.attname,
pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
AND a.atthasdef)
AS DEFAULT,
a.attnotnull, a.attnum, a.attrelid as table_oid
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = :table_oid
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
"""
s = sql.text(SQL_COLS,
bindparams=[
sql.bindparam('table_oid', type_=sqltypes.Integer)],
typemap={
'attname': sqltypes.Unicode,
'default': sqltypes.Unicode}
)
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
domains = self._load_domains(connection)
enums = self._load_enums(connection)
# format columns
columns = []
for name, format_type, default, notnull, attnum, table_oid in rows:
column_info = self._get_column_info(
name, format_type, default, notnull, domains, enums, schema)
columns.append(column_info)
return columns
def _get_column_info(self, name, format_type, default,
notnull, domains, enums, schema):
# strip (*) from character varying(5), timestamp(5)
# with time zone, geometry(POLYGON), etc.
attype = re.sub(r'\(.*\)', '', format_type)
# strip '[]' from integer[], etc.
attype = re.sub(r'\[\]', '', attype)
nullable = not notnull
is_array = format_type.endswith('[]')
charlen = re.search('\(([\d,]+)\)', format_type)
if charlen:
charlen = charlen.group(1)
args = re.search('\((.*)\)', format_type)
if args and args.group(1):
args = tuple(re.split('\s*,\s*', args.group(1)))
else:
args = ()
kwargs = {}
if attype == 'numeric':
if charlen:
prec, scale = charlen.split(',')
args = (int(prec), int(scale))
else:
args = ()
elif attype == 'double precision':
args = (53, )
elif attype == 'integer':
args = ()
elif attype in ('timestamp with time zone',
'time with time zone'):
kwargs['timezone'] = True
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype in ('timestamp without time zone',
'time without time zone', 'time'):
kwargs['timezone'] = False
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype == 'bit varying':
kwargs['varying'] = True
if charlen:
args = (int(charlen),)
else:
args = ()
elif attype in ('interval', 'interval year to month',
'interval day to second'):
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif charlen:
args = (int(charlen),)
while True:
if attype in self.ischema_names:
coltype = self.ischema_names[attype]
break
elif attype in enums:
enum = enums[attype]
coltype = ENUM
if "." in attype:
kwargs['schema'], kwargs['name'] = attype.split('.')
else:
kwargs['name'] = attype
args = tuple(enum['labels'])
break
elif attype in domains:
domain = domains[attype]
attype = domain['attype']
# A table can't override whether the domain is nullable.
nullable = domain['nullable']
if domain['default'] and not default:
# It can, however, override the default
# value, but can't set it to null.
default = domain['default']
continue
else:
coltype = None
break
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(attype, name))
coltype = sqltypes.NULLTYPE
# adjust the default value
autoincrement = False
if default is not None:
match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
if match is not None:
autoincrement = True
# the default is related to a Sequence
sch = schema
if '.' not in match.group(2) and sch is not None:
# unconditionally quote the schema name. this could
# later be enhanced to obey quoting rules /
# "quote schema"
default = match.group(1) + \
('"%s"' % sch) + '.' + \
match.group(2) + match.group(3)
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
if self.server_version_info < (8, 4):
PK_SQL = """
SELECT a.attname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_attribute a
on t.oid=a.attrelid AND %s
WHERE
t.oid = :table_oid and ix.indisprimary = 't'
ORDER BY a.attnum
""" % self._pg_index_any("a.attnum", "ix.indkey")
else:
# unnest() and generate_subscripts() both introduced in
# version 8.4
PK_SQL = """
SELECT a.attname
FROM pg_attribute a JOIN (
SELECT unnest(ix.indkey) attnum,
generate_subscripts(ix.indkey, 1) ord
FROM pg_index ix
WHERE ix.indrelid = :table_oid AND ix.indisprimary
) k ON a.attnum=k.attnum
WHERE a.attrelid = :table_oid
ORDER BY k.ord
"""
t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
cols = [r[0] for r in c.fetchall()]
PK_CONS_SQL = """
SELECT conname
FROM pg_catalog.pg_constraint r
WHERE r.conrelid = :table_oid AND r.contype = 'p'
ORDER BY 1
"""
t = sql.text(PK_CONS_SQL, typemap={'conname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
name = c.scalar()
return {'constrained_columns': cols, 'name': name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None,
postgresql_ignore_search_path=False, **kw):
preparer = self.identifier_preparer
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
FK_SQL = """
SELECT r.conname,
pg_catalog.pg_get_constraintdef(r.oid, true) as condef,
n.nspname as conschema
FROM pg_catalog.pg_constraint r,
pg_namespace n,
pg_class c
WHERE r.conrelid = :table AND
r.contype = 'f' AND
c.oid = confrelid AND
n.oid = c.relnamespace
ORDER BY 1
"""
# http://www.postgresql.org/docs/9.0/static/sql-createtable.html
FK_REGEX = re.compile(
r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)'
r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?'
r'[\s]?(ON UPDATE '
r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
r'[\s]?(ON DELETE '
r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?'
r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?'
)
t = sql.text(FK_SQL, typemap={
'conname': sqltypes.Unicode,
'condef': sqltypes.Unicode})
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef, conschema in c.fetchall():
m = re.search(FK_REGEX, condef).groups()
constrained_columns, referred_schema, \
referred_table, referred_columns, \
_, match, _, onupdate, _, ondelete, \
deferrable, _, initially = m
if deferrable is not None:
deferrable = True if deferrable == 'DEFERRABLE' else False
constrained_columns = [preparer._unquote_identifier(x)
for x in re.split(
r'\s*,\s*', constrained_columns)]
if postgresql_ignore_search_path:
# when ignoring search path, we use the actual schema
# provided it isn't the "default" schema
if conschema != self.default_schema_name:
referred_schema = conschema
else:
referred_schema = schema
elif referred_schema:
# referred_schema is the schema that we regexp'ed from
# pg_get_constraintdef(). If the schema is in the search
# path, pg_get_constraintdef() will give us None.
referred_schema = \
preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == conschema:
# If the actual schema matches the schema of the table
# we're reflecting, then we will use that.
referred_schema = schema
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [preparer._unquote_identifier(x)
for x in
re.split(r'\s*,\s', referred_columns)]
fkey_d = {
'name': conname,
'constrained_columns': constrained_columns,
'referred_schema': referred_schema,
'referred_table': referred_table,
'referred_columns': referred_columns,
'options': {
'onupdate': onupdate,
'ondelete': ondelete,
'deferrable': deferrable,
'initially': initially,
'match': match
}
}
fkeys.append(fkey_d)
return fkeys
def _pg_index_any(self, col, compare_to):
if self.server_version_info < (8, 1):
# http://www.postgresql.org/message-id/10279.1124395722@sss.pgh.pa.us
# "In CVS tip you could replace this with "attnum = ANY (indkey)".
# Unfortunately, most array support doesn't work on int2vector in
# pre-8.1 releases, so I think you're kinda stuck with the above
# for now.
# regards, tom lane"
return "(%s)" % " OR ".join(
"%s[%d] = %s" % (compare_to, ind, col)
for ind in range(0, 10)
)
else:
return "%s = ANY(%s)" % (col, compare_to)
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
# cast indkey as varchar since it's an int2vector,
# returned as a list by some drivers such as pypostgresql
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname, a.attnum, ix.indkey%s
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid=ix.indexrelid
left outer join
pg_attribute a
on t.oid=a.attrelid and %s
WHERE
t.relkind = 'r'
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
""" % (
# version 8.3 here was based on observing the
# cast does not work in PG 8.2.4, does work in 8.3.0.
# nothing in PG changelogs regarding this.
"::varchar" if self.server_version_info >= (8, 3) else "",
self._pg_index_any("a.attnum", "ix.indkey")
)
t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
indexes = defaultdict(lambda: defaultdict(dict))
sv_idx_name = None
for row in c.fetchall():
idx_name, unique, expr, prd, col, col_num, idx_key = row
if expr:
if idx_name != sv_idx_name:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s"
% idx_name)
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
"Predicate of partial index %s ignored during reflection"
% idx_name)
sv_idx_name = idx_name
index = indexes[idx_name]
if col is not None:
index['cols'][col_num] = col
index['key'] = [int(k.strip()) for k in idx_key.split()]
index['unique'] = unique
return [
{'name': name,
'unique': idx['unique'],
'column_names': [idx['cols'][i] for i in idx['key']]}
for name, idx in indexes.items()
]
@reflection.cache
def get_unique_constraints(self, connection, table_name,
schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
UNIQUE_SQL = """
SELECT
cons.conname as name,
cons.conkey as key,
a.attnum as col_num,
a.attname as col_name
FROM
pg_catalog.pg_constraint cons
join pg_attribute a
on cons.conrelid = a.attrelid AND
a.attnum = ANY(cons.conkey)
WHERE
cons.conrelid = :table_oid AND
cons.contype = 'u'
"""
t = sql.text(UNIQUE_SQL, typemap={'col_name': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
uniques = defaultdict(lambda: defaultdict(dict))
for row in c.fetchall():
uc = uniques[row.name]
uc["key"] = row.key
uc["cols"][row.col_num] = row.col_name
return [
{'name': name,
'column_names': [uc["cols"][i] for i in uc["key"]]}
for name, uc in uniques.items()
]
def _load_enums(self, connection):
if not self.supports_native_enum:
return {}
# Load data types for enums:
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
-- t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema",
e.enumlabel as "label"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
WHERE t.typtype = 'e'
ORDER BY "name", e.oid -- e.oid gives us label order
"""
s = sql.text(SQL_ENUMS, typemap={
'attname': sqltypes.Unicode,
'label': sqltypes.Unicode})
c = connection.execute(s)
enums = {}
for enum in c.fetchall():
if enum['visible']:
# 'visible' just means whether or not the enum is in a
# schema that's on the search path -- or not overridden by
# a schema with higher precedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = enum['name']
else:
name = "%s.%s" % (enum['schema'], enum['name'])
if name in enums:
enums[name]['labels'].append(enum['label'])
else:
enums[name] = {
'labels': [enum['label']],
}
return enums
def _load_domains(self, connection):
# Load data types for domains:
SQL_DOMAINS = """
SELECT t.typname as "name",
pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
not t.typnotnull as "nullable",
t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE t.typtype = 'd'
"""
s = sql.text(SQL_DOMAINS, typemap={'attname': sqltypes.Unicode})
c = connection.execute(s)
domains = {}
for domain in c.fetchall():
# strip (30) from character varying(30)
attype = re.search('([^\(]+)', domain['attype']).group(1)
if domain['visible']:
# 'visible' just means whether or not the domain is in a
# schema that's on the search path -- or not overridden by
# a schema with higher precedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = domain['name']
else:
name = "%s.%s" % (domain['schema'], domain['name'])
domains[name] = {
'attype': attype,
'nullable': domain['nullable'],
'default': domain['default']
}
return domains
| mit |
esttorhe/mergepbx | src/plist/antlr/runtime/antlr3/recognizers.py | 4 | 51730 | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
import sys
import inspect
from . import runtime_version, runtime_version_str
from .constants import DEFAULT_CHANNEL, HIDDEN_CHANNEL, EOF, \
EOR_TOKEN_TYPE, INVALID_TOKEN_TYPE
from .exceptions import RecognitionException, MismatchedTokenException, \
MismatchedRangeException, MismatchedTreeNodeException, \
NoViableAltException, EarlyExitException, MismatchedSetException, \
MismatchedNotSetException, FailedPredicateException, \
BacktrackingFailed, UnwantedTokenException, MissingTokenException
from .tokens import CommonToken, EOF_TOKEN, SKIP_TOKEN
from .compat import set, frozenset, reversed
class RecognizerSharedState(object):
"""
The set of fields needed by an abstract recognizer to recognize input
and recover from errors etc... As a separate state object, it can be
shared among multiple grammars; e.g., when one grammar imports another.
These fields are publically visible but the actual state pointer per
parser is protected.
"""
def __init__(self):
# Track the set of token types that can follow any rule invocation.
# Stack grows upwards.
self.following = []
# This is true when we see an error and before having successfully
# matched a token. Prevents generation of more than one error message
# per error.
self.errorRecovery = False
# The index into the input stream where the last error occurred.
# This is used to prevent infinite loops where an error is found
# but no token is consumed during recovery...another error is found,
# ad naseum. This is a failsafe mechanism to guarantee that at least
# one token/tree node is consumed for two errors.
self.lastErrorIndex = -1
# If 0, no backtracking is going on. Safe to exec actions etc...
# If >0 then it's the level of backtracking.
self.backtracking = 0
# An array[size num rules] of Map<Integer,Integer> that tracks
# the stop token index for each rule. ruleMemo[ruleIndex] is
# the memoization table for ruleIndex. For key ruleStartIndex, you
# get back the stop token for associated rule or MEMO_RULE_FAILED.
#
# This is only used if rule memoization is on (which it is by default).
self.ruleMemo = None
## Did the recognizer encounter a syntax error? Track how many.
self.syntaxErrors = 0
# LEXER FIELDS (must be in same state object to avoid casting
# constantly in generated code and Lexer object) :(
## The goal of all lexer rules/methods is to create a token object.
# This is an instance variable as multiple rules may collaborate to
# create a single token. nextToken will return this object after
# matching lexer rule(s). If you subclass to allow multiple token
# emissions, then set this to the last token to be matched or
# something nonnull so that the auto token emit mechanism will not
# emit another token.
self.token = None
## What character index in the stream did the current token start at?
# Needed, for example, to get the text for current token. Set at
# the start of nextToken.
self.tokenStartCharIndex = -1
## The line on which the first character of the token resides
self.tokenStartLine = None
## The character position of first character within the line
self.tokenStartCharPositionInLine = None
## The channel number for the current token
self.channel = None
## The token type for the current token
self.type = None
## You can set the text for the current token to override what is in
# the input char buffer. Use setText() or can set this instance var.
self.text = None
class BaseRecognizer(object):
"""
@brief Common recognizer functionality.
A generic recognizer that can handle recognizers generated from
lexer, parser, and tree grammars. This is all the parsing
support code essentially; most of it is error recovery stuff and
backtracking.
"""
MEMO_RULE_FAILED = -2
MEMO_RULE_UNKNOWN = -1
# copies from Token object for convenience in actions
DEFAULT_TOKEN_CHANNEL = DEFAULT_CHANNEL
# for convenience in actions
HIDDEN = HIDDEN_CHANNEL
# overridden by generated subclasses
tokenNames = None
# The antlr_version attribute has been introduced in 3.1. If it is not
# overwritten in the generated recognizer, we assume a default of 3.0.1.
antlr_version = (3, 0, 1, 0)
antlr_version_str = "3.0.1"
def __init__(self, state=None):
# Input stream of the recognizer. Must be initialized by a subclass.
self.input = None
## State of a lexer, parser, or tree parser are collected into a state
# object so the state can be shared. This sharing is needed to
# have one grammar import others and share same error variables
# and other state variables. It's a kind of explicit multiple
# inheritance via delegation of methods and shared state.
if state is None:
state = RecognizerSharedState()
self._state = state
if self.antlr_version > runtime_version:
raise RuntimeError(
"ANTLR version mismatch: "
"The recognizer has been generated by V%s, but this runtime "
"is V%s. Please use the V%s runtime or higher."
% (self.antlr_version_str,
runtime_version_str,
self.antlr_version_str))
elif (self.antlr_version < (3, 1, 0, 0) and
self.antlr_version != runtime_version):
# FIXME: make the runtime compatible with 3.0.1 codegen
# and remove this block.
raise RuntimeError(
"ANTLR version mismatch: "
"The recognizer has been generated by V%s, but this runtime "
"is V%s. Please use the V%s runtime."
% (self.antlr_version_str,
runtime_version_str,
self.antlr_version_str))
# this one only exists to shut up pylint :(
def setInput(self, input):
self.input = input
def reset(self):
"""
reset the parser's state; subclasses must rewinds the input stream
"""
# wack everything related to error recovery
if self._state is None:
# no shared state work to do
return
self._state.following = []
self._state.errorRecovery = False
self._state.lastErrorIndex = -1
self._state.syntaxErrors = 0
# wack everything related to backtracking and memoization
self._state.backtracking = 0
if self._state.ruleMemo is not None:
self._state.ruleMemo = {}
def match(self, input, ttype, follow):
"""
Match current input symbol against ttype. Attempt
single token insertion or deletion error recovery. If
that fails, throw MismatchedTokenException.
To turn off single token insertion or deletion error
recovery, override recoverFromMismatchedToken() and have it
throw an exception. See TreeParser.recoverFromMismatchedToken().
This way any error in a rule will cause an exception and
immediate exit from rule. Rule would recover by resynchronizing
to the set of symbols that can follow rule ref.
"""
matchedSymbol = self.getCurrentInputSymbol(input)
if self.input.LA(1) == ttype:
self.input.consume()
self._state.errorRecovery = False
return matchedSymbol
if self._state.backtracking > 0:
# FIXME: need to return matchedSymbol here as well. damn!!
raise BacktrackingFailed
matchedSymbol = self.recoverFromMismatchedToken(input, ttype, follow)
return matchedSymbol
def matchAny(self, input):
"""Match the wildcard: in a symbol"""
self._state.errorRecovery = False
self.input.consume()
def mismatchIsUnwantedToken(self, input, ttype):
return input.LA(2) == ttype
def mismatchIsMissingToken(self, input, follow):
if follow is None:
# we have no information about the follow; we can only consume
# a single token and hope for the best
return False
# compute what can follow this grammar element reference
if EOR_TOKEN_TYPE in follow:
viableTokensFollowingThisRule = self.computeContextSensitiveRuleFOLLOW()
follow = follow | viableTokensFollowingThisRule
if len(self._state.following) > 0:
# remove EOR if we're not the start symbol
follow = follow - set([EOR_TOKEN_TYPE])
# if current token is consistent with what could come after set
# then we know we're missing a token; error recovery is free to
# "insert" the missing token
if input.LA(1) in follow or EOR_TOKEN_TYPE in follow:
return True
return False
def reportError(self, e):
"""Report a recognition problem.
This method sets errorRecovery to indicate the parser is recovering
not parsing. Once in recovery mode, no errors are generated.
To get out of recovery mode, the parser must successfully match
a token (after a resync). So it will go:
1. error occurs
2. enter recovery mode, report error
3. consume until token found in resynch set
4. try to resume parsing
5. next match() will reset errorRecovery mode
If you override, make sure to update syntaxErrors if you care about
that.
"""
# if we've already reported an error and have not matched a token
# yet successfully, don't report any errors.
if self._state.errorRecovery:
return
self._state.syntaxErrors += 1 # don't count spurious
self._state.errorRecovery = True
self.displayRecognitionError(self.tokenNames, e)
def displayRecognitionError(self, tokenNames, e):
hdr = self.getErrorHeader(e)
msg = self.getErrorMessage(e, tokenNames)
self.emitErrorMessage(hdr+" "+msg)
def getErrorMessage(self, e, tokenNames):
"""
What error message should be generated for the various
exception types?
Not very object-oriented code, but I like having all error message
generation within one method rather than spread among all of the
exception classes. This also makes it much easier for the exception
handling because the exception classes do not have to have pointers back
to this object to access utility routines and so on. Also, changing
the message for an exception type would be difficult because you
would have to subclassing exception, but then somehow get ANTLR
to make those kinds of exception objects instead of the default.
This looks weird, but trust me--it makes the most sense in terms
of flexibility.
For grammar debugging, you will want to override this to add
more information such as the stack frame with
getRuleInvocationStack(e, this.getClass().getName()) and,
for no viable alts, the decision description and state etc...
Override this to change the message generated for one or more
exception types.
"""
if isinstance(e, UnwantedTokenException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "extraneous input %s expecting %s" % (
self.getTokenErrorDisplay(e.getUnexpectedToken()),
tokenName
)
elif isinstance(e, MissingTokenException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "missing %s at %s" % (
tokenName, self.getTokenErrorDisplay(e.token)
)
elif isinstance(e, MismatchedTokenException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "mismatched input " \
+ self.getTokenErrorDisplay(e.token) \
+ " expecting " \
+ tokenName
elif isinstance(e, MismatchedTreeNodeException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "mismatched tree node: %s expecting %s" \
% (e.node, tokenName)
elif isinstance(e, NoViableAltException):
msg = "no viable alternative at input " \
+ self.getTokenErrorDisplay(e.token)
elif isinstance(e, EarlyExitException):
msg = "required (...)+ loop did not match anything at input " \
+ self.getTokenErrorDisplay(e.token)
elif isinstance(e, MismatchedSetException):
msg = "mismatched input " \
+ self.getTokenErrorDisplay(e.token) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, MismatchedNotSetException):
msg = "mismatched input " \
+ self.getTokenErrorDisplay(e.token) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, FailedPredicateException):
msg = "rule " \
+ e.ruleName \
+ " failed predicate: {" \
+ e.predicateText \
+ "}?"
else:
msg = str(e)
return msg
def getNumberOfSyntaxErrors(self):
"""
Get number of recognition errors (lexer, parser, tree parser). Each
recognizer tracks its own number. So parser and lexer each have
separate count. Does not count the spurious errors found between
an error and next valid token match
See also reportError()
"""
return self._state.syntaxErrors
def getErrorHeader(self, e):
"""
What is the error header, normally line/character position information?
"""
return "line %d:%d" % (e.line, e.charPositionInLine)
def getTokenErrorDisplay(self, t):
"""
How should a token be displayed in an error message? The default
is to display just the text, but during development you might
want to have a lot of information spit out. Override in that case
to use t.toString() (which, for CommonToken, dumps everything about
the token). This is better than forcing you to override a method in
your token objects because you don't have to go modify your lexer
so that it creates a new Java type.
"""
s = t.text
if s is None:
if t.type == EOF:
s = "<EOF>"
else:
s = "<"+t.type+">"
return repr(s)
def emitErrorMessage(self, msg):
"""Override this method to change where error messages go"""
sys.stderr.write(msg + '\n')
def recover(self, input, re):
"""
Recover from an error found on the input stream. This is
for NoViableAlt and mismatched symbol exceptions. If you enable
single token insertion and deletion, this will usually not
handle mismatched symbol exceptions but there could be a mismatched
token that the match() routine could not recover from.
"""
# PROBLEM? what if input stream is not the same as last time
# perhaps make lastErrorIndex a member of input
if self._state.lastErrorIndex == input.index():
# uh oh, another error at same token index; must be a case
# where LT(1) is in the recovery token set so nothing is
# consumed; consume a single token so at least to prevent
# an infinite loop; this is a failsafe.
input.consume()
self._state.lastErrorIndex = input.index()
followSet = self.computeErrorRecoverySet()
self.beginResync()
self.consumeUntil(input, followSet)
self.endResync()
def beginResync(self):
"""
A hook to listen in on the token consumption during error recovery.
The DebugParser subclasses this to fire events to the listenter.
"""
pass
def endResync(self):
"""
A hook to listen in on the token consumption during error recovery.
The DebugParser subclasses this to fire events to the listenter.
"""
pass
def computeErrorRecoverySet(self):
"""
Compute the error recovery set for the current rule. During
rule invocation, the parser pushes the set of tokens that can
follow that rule reference on the stack; this amounts to
computing FIRST of what follows the rule reference in the
enclosing rule. This local follow set only includes tokens
from within the rule; i.e., the FIRST computation done by
ANTLR stops at the end of a rule.
EXAMPLE
When you find a "no viable alt exception", the input is not
consistent with any of the alternatives for rule r. The best
thing to do is to consume tokens until you see something that
can legally follow a call to r *or* any rule that called r.
You don't want the exact set of viable next tokens because the
input might just be missing a token--you might consume the
rest of the input looking for one of the missing tokens.
Consider grammar:
a : '[' b ']'
| '(' b ')'
;
b : c '^' INT ;
c : ID
| INT
;
At each rule invocation, the set of tokens that could follow
that rule is pushed on a stack. Here are the various "local"
follow sets:
FOLLOW(b1_in_a) = FIRST(']') = ']'
FOLLOW(b2_in_a) = FIRST(')') = ')'
FOLLOW(c_in_b) = FIRST('^') = '^'
Upon erroneous input "[]", the call chain is
a -> b -> c
and, hence, the follow context stack is:
depth local follow set after call to rule
0 \<EOF> a (from main())
1 ']' b
3 '^' c
Notice that ')' is not included, because b would have to have
been called from a different context in rule a for ')' to be
included.
For error recovery, we cannot consider FOLLOW(c)
(context-sensitive or otherwise). We need the combined set of
all context-sensitive FOLLOW sets--the set of all tokens that
could follow any reference in the call chain. We need to
resync to one of those tokens. Note that FOLLOW(c)='^' and if
we resync'd to that token, we'd consume until EOF. We need to
sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
In this case, for input "[]", LA(1) is in this set so we would
not consume anything and after printing an error rule c would
return normally. It would not find the required '^' though.
At this point, it gets a mismatched token error and throws an
exception (since LA(1) is not in the viable following token
set). The rule exception handler tries to recover, but finds
the same recovery set and doesn't consume anything. Rule b
exits normally returning to rule a. Now it finds the ']' (and
with the successful match exits errorRecovery mode).
So, you cna see that the parser walks up call chain looking
for the token that was a member of the recovery set.
Errors are not generated in errorRecovery mode.
ANTLR's error recovery mechanism is based upon original ideas:
"Algorithms + Data Structures = Programs" by Niklaus Wirth
and
"A note on error recovery in recursive descent parsers":
http://portal.acm.org/citation.cfm?id=947902.947905
Later, Josef Grosch had some good ideas:
"Efficient and Comfortable Error Recovery in Recursive Descent
Parsers":
ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
Like Grosch I implemented local FOLLOW sets that are combined
at run-time upon error to avoid overhead during parsing.
"""
return self.combineFollows(False)
def computeContextSensitiveRuleFOLLOW(self):
"""
Compute the context-sensitive FOLLOW set for current rule.
This is set of token types that can follow a specific rule
reference given a specific call chain. You get the set of
viable tokens that can possibly come next (lookahead depth 1)
given the current call chain. Contrast this with the
definition of plain FOLLOW for rule r:
FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
where x in T* and alpha, beta in V*; T is set of terminals and
V is the set of terminals and nonterminals. In other words,
FOLLOW(r) is the set of all tokens that can possibly follow
references to r in *any* sentential form (context). At
runtime, however, we know precisely which context applies as
we have the call chain. We may compute the exact (rather
than covering superset) set of following tokens.
For example, consider grammar:
stat : ID '=' expr ';' // FOLLOW(stat)=={EOF}
| "return" expr '.'
;
expr : atom ('+' atom)* ; // FOLLOW(expr)=={';','.',')'}
atom : INT // FOLLOW(atom)=={'+',')',';','.'}
| '(' expr ')'
;
The FOLLOW sets are all inclusive whereas context-sensitive
FOLLOW sets are precisely what could follow a rule reference.
For input input "i=(3);", here is the derivation:
stat => ID '=' expr ';'
=> ID '=' atom ('+' atom)* ';'
=> ID '=' '(' expr ')' ('+' atom)* ';'
=> ID '=' '(' atom ')' ('+' atom)* ';'
=> ID '=' '(' INT ')' ('+' atom)* ';'
=> ID '=' '(' INT ')' ';'
At the "3" token, you'd have a call chain of
stat -> expr -> atom -> expr -> atom
What can follow that specific nested ref to atom? Exactly ')'
as you can see by looking at the derivation of this specific
input. Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
You want the exact viable token set when recovering from a
token mismatch. Upon token mismatch, if LA(1) is member of
the viable next token set, then you know there is most likely
a missing token in the input stream. "Insert" one by just not
throwing an exception.
"""
return self.combineFollows(True)
def combineFollows(self, exact):
followSet = set()
for idx, localFollowSet in reversed(list(enumerate(self._state.following))):
followSet |= localFollowSet
if exact:
# can we see end of rule?
if EOR_TOKEN_TYPE in localFollowSet:
# Only leave EOR in set if at top (start rule); this lets
# us know if have to include follow(start rule); i.e., EOF
if idx > 0:
followSet.remove(EOR_TOKEN_TYPE)
else:
# can't see end of rule, quit
break
return followSet
def recoverFromMismatchedToken(self, input, ttype, follow):
"""Attempt to recover from a single missing or extra token.
EXTRA TOKEN
LA(1) is not what we are looking for. If LA(2) has the right token,
however, then assume LA(1) is some extra spurious token. Delete it
and LA(2) as if we were doing a normal match(), which advances the
input.
MISSING TOKEN
If current token is consistent with what could come after
ttype then it is ok to 'insert' the missing token, else throw
exception For example, Input 'i=(3;' is clearly missing the
')'. When the parser returns from the nested call to expr, it
will have call chain:
stat -> expr -> atom
and it will be trying to match the ')' at this point in the
derivation:
=> ID '=' '(' INT ')' ('+' atom)* ';'
^
match() will see that ';' doesn't match ')' and report a
mismatched token error. To recover, it sees that LA(1)==';'
is in the set of tokens that can follow the ')' token
reference in rule atom. It can assume that you forgot the ')'.
"""
e = None
# if next token is what we are looking for then "delete" this token
if self.mismatchIsUnwantedToken(input, ttype):
e = UnwantedTokenException(ttype, input)
self.beginResync()
input.consume() # simply delete extra token
self.endResync()
# report after consuming so AW sees the token in the exception
self.reportError(e)
# we want to return the token we're actually matching
matchedSymbol = self.getCurrentInputSymbol(input)
# move past ttype token as if all were ok
input.consume()
return matchedSymbol
# can't recover with single token deletion, try insertion
if self.mismatchIsMissingToken(input, follow):
inserted = self.getMissingSymbol(input, e, ttype, follow)
e = MissingTokenException(ttype, input, inserted)
# report after inserting so AW sees the token in the exception
self.reportError(e)
return inserted
# even that didn't work; must throw the exception
e = MismatchedTokenException(ttype, input)
raise e
def recoverFromMismatchedSet(self, input, e, follow):
"""Not currently used"""
if self.mismatchIsMissingToken(input, follow):
self.reportError(e)
# we don't know how to conjure up a token for sets yet
return self.getMissingSymbol(input, e, INVALID_TOKEN_TYPE, follow)
# TODO do single token deletion like above for Token mismatch
raise e
def getCurrentInputSymbol(self, input):
"""
Match needs to return the current input symbol, which gets put
into the label for the associated token ref; e.g., x=ID. Token
and tree parsers need to return different objects. Rather than test
for input stream type or change the IntStream interface, I use
a simple method to ask the recognizer to tell me what the current
input symbol is.
This is ignored for lexers.
"""
return None
def getMissingSymbol(self, input, e, expectedTokenType, follow):
"""Conjure up a missing token during error recovery.
The recognizer attempts to recover from single missing
symbols. But, actions might refer to that missing symbol.
For example, x=ID {f($x);}. The action clearly assumes
that there has been an identifier matched previously and that
$x points at that token. If that token is missing, but
the next token in the stream is what we want we assume that
this token is missing and we keep going. Because we
have to return some token to replace the missing token,
we have to conjure one up. This method gives the user control
over the tokens returned for missing tokens. Mostly,
you will want to create something special for identifier
tokens. For literals such as '{' and ',', the default
action in the parser or tree parser works. It simply creates
a CommonToken of the appropriate type. The text will be the token.
If you change what tokens must be created by the lexer,
override this method to create the appropriate tokens.
"""
return None
## def recoverFromMissingElement(self, input, e, follow):
## """
## This code is factored out from mismatched token and mismatched set
## recovery. It handles "single token insertion" error recovery for
## both. No tokens are consumed to recover from insertions. Return
## true if recovery was possible else return false.
## """
## if self.mismatchIsMissingToken(input, follow):
## self.reportError(e)
## return True
## # nothing to do; throw exception
## return False
def consumeUntil(self, input, tokenTypes):
"""
Consume tokens until one matches the given token or token set
tokenTypes can be a single token type or a set of token types
"""
if not isinstance(tokenTypes, (set, frozenset)):
tokenTypes = frozenset([tokenTypes])
ttype = input.LA(1)
while ttype != EOF and ttype not in tokenTypes:
input.consume()
ttype = input.LA(1)
def getRuleInvocationStack(self):
"""
Return List<String> of the rules in your parser instance
leading up to a call to this method. You could override if
you want more details such as the file/line info of where
in the parser java code a rule is invoked.
This is very useful for error messages and for context-sensitive
error recovery.
You must be careful, if you subclass a generated recognizers.
The default implementation will only search the module of self
for rules, but the subclass will not contain any rules.
You probably want to override this method to look like
def getRuleInvocationStack(self):
return self._getRuleInvocationStack(<class>.__module__)
where <class> is the class of the generated recognizer, e.g.
the superclass of self.
"""
return self._getRuleInvocationStack(self.__module__)
def _getRuleInvocationStack(cls, module):
"""
A more general version of getRuleInvocationStack where you can
pass in, for example, a RecognitionException to get it's rule
stack trace. This routine is shared with all recognizers, hence,
static.
TODO: move to a utility class or something; weird having lexer call
this
"""
# mmmhhh,... perhaps look at the first argument
# (f_locals[co_varnames[0]]?) and test if it's a (sub)class of
# requested recognizer...
rules = []
for frame in reversed(inspect.stack()):
code = frame[0].f_code
codeMod = inspect.getmodule(code)
if codeMod is None:
continue
# skip frames not in requested module
if codeMod.__name__ != module:
continue
# skip some unwanted names
if code.co_name in ('nextToken', '<module>'):
continue
rules.append(code.co_name)
return rules
_getRuleInvocationStack = classmethod(_getRuleInvocationStack)
def getBacktrackingLevel(self):
return self._state.backtracking
def setBacktrackingLevel(self, n):
self._state.backtracking = n
def failed(self):
"""Return whether or not a backtracking attempt failed."""
return self._state.failed
def getGrammarFileName(self):
"""For debugging and other purposes, might want the grammar name.
Have ANTLR generate an implementation for this method.
"""
return self.grammarFileName
def getSourceName(self):
raise NotImplementedError
def toStrings(self, tokens):
"""A convenience method for use most often with template rewrites.
Convert a List<Token> to List<String>
"""
if tokens is None:
return None
return [token.text for token in tokens]
def getRuleMemoization(self, ruleIndex, ruleStartIndex):
"""
Given a rule number and a start token index number, return
MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
start index. If this rule has parsed input starting from the
start index before, then return where the rule stopped parsing.
It returns the index of the last token matched by the rule.
"""
if ruleIndex not in self._state.ruleMemo:
self._state.ruleMemo[ruleIndex] = {}
return self._state.ruleMemo[ruleIndex].get(
ruleStartIndex, self.MEMO_RULE_UNKNOWN
)
def alreadyParsedRule(self, input, ruleIndex):
"""
Has this rule already parsed input at the current index in the
input stream? Return the stop token index or MEMO_RULE_UNKNOWN.
If we attempted but failed to parse properly before, return
MEMO_RULE_FAILED.
This method has a side-effect: if we have seen this input for
this rule and successfully parsed before, then seek ahead to
1 past the stop token matched for this rule last time.
"""
stopIndex = self.getRuleMemoization(ruleIndex, input.index())
if stopIndex == self.MEMO_RULE_UNKNOWN:
return False
if stopIndex == self.MEMO_RULE_FAILED:
raise BacktrackingFailed
else:
input.seek(stopIndex + 1)
return True
def memoize(self, input, ruleIndex, ruleStartIndex, success):
"""
Record whether or not this rule parsed the input at this position
successfully.
"""
if success:
stopTokenIndex = input.index() - 1
else:
stopTokenIndex = self.MEMO_RULE_FAILED
if ruleIndex in self._state.ruleMemo:
self._state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex
def traceIn(self, ruleName, ruleIndex, inputSymbol):
sys.stdout.write("enter %s %s" % (ruleName, inputSymbol))
if self._state.backtracking > 0:
sys.stdout.write(" backtracking=%s" % self._state.backtracking)
sys.stdout.write('\n')
def traceOut(self, ruleName, ruleIndex, inputSymbol):
sys.stdout.write("exit %s %s" % (ruleName, inputSymbol))
if self._state.backtracking > 0:
sys.stdout.write(" backtracking=%s" % self._state.backtracking)
if self._state.failed:
sys.stdout.write(" failed")
else:
sys.stdout.write(" succeeded")
sys.stdout.write('\n')
class TokenSource(object):
"""
@brief Abstract baseclass for token producers.
A source of tokens must provide a sequence of tokens via nextToken()
and also must reveal it's source of characters; CommonToken's text is
computed from a CharStream; it only store indices into the char stream.
Errors from the lexer are never passed to the parser. Either you want
to keep going or you do not upon token recognition error. If you do not
want to continue lexing then you do not want to continue parsing. Just
throw an exception not under RecognitionException and Java will naturally
toss you all the way out of the recognizers. If you want to continue
lexing then you should not throw an exception to the parser--it has already
requested a token. Keep lexing until you get a valid one. Just report
errors and keep going, looking for a valid token.
"""
def nextToken(self):
"""Return a Token object from your input stream (usually a CharStream).
Do not fail/return upon lexing error; keep chewing on the characters
until you get a good one; errors are not passed through to the parser.
"""
raise NotImplementedError
def __iter__(self):
"""The TokenSource is an interator.
The iteration will not include the final EOF token, see also the note
for the next() method.
"""
return self
def next(self):
"""Return next token or raise StopIteration.
Note that this will raise StopIteration when hitting the EOF token,
so EOF will not be part of the iteration.
"""
token = self.nextToken()
if token is None or token.type == EOF:
raise StopIteration
return token
class Lexer(BaseRecognizer, TokenSource):
"""
@brief Baseclass for generated lexer classes.
A lexer is recognizer that draws input symbols from a character stream.
lexer grammars result in a subclass of this object. A Lexer object
uses simplified match() and error recovery mechanisms in the interest
of speed.
"""
def __init__(self, input, state=None):
BaseRecognizer.__init__(self, state)
TokenSource.__init__(self)
# Where is the lexer drawing characters from?
self.input = input
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
# rewind the input
self.input.seek(0)
if self._state is None:
# no shared state work to do
return
# wack Lexer state variables
self._state.token = None
self._state.type = INVALID_TOKEN_TYPE
self._state.channel = DEFAULT_CHANNEL
self._state.tokenStartCharIndex = -1
self._state.tokenStartLine = -1
self._state.tokenStartCharPositionInLine = -1
self._state.text = None
def nextToken(self):
"""
Return a token from this source; i.e., match a token on the char
stream.
"""
while 1:
self._state.token = None
self._state.channel = DEFAULT_CHANNEL
self._state.tokenStartCharIndex = self.input.index()
self._state.tokenStartCharPositionInLine = self.input.charPositionInLine
self._state.tokenStartLine = self.input.line
self._state.text = None
if self.input.LA(1) == EOF:
return EOF_TOKEN
try:
self.mTokens()
if self._state.token is None:
self.emit()
elif self._state.token == SKIP_TOKEN:
continue
return self._state.token
except NoViableAltException, re:
self.reportError(re)
self.recover(re) # throw out current char and try again
except RecognitionException, re:
self.reportError(re)
# match() routine has already called recover()
def skip(self):
"""
Instruct the lexer to skip creating a token for current lexer rule
and look for another token. nextToken() knows to keep looking when
a lexer rule finishes with token set to SKIP_TOKEN. Recall that
if token==null at end of any token rule, it creates one for you
and emits it.
"""
self._state.token = SKIP_TOKEN
def mTokens(self):
"""This is the lexer entry point that sets instance var 'token'"""
# abstract method
raise NotImplementedError
def setCharStream(self, input):
"""Set the char stream and reset the lexer"""
self.input = None
self.reset()
self.input = input
def getSourceName(self):
return self.input.getSourceName()
def emit(self, token=None):
"""
The standard method called to automatically emit a token at the
outermost lexical rule. The token object should point into the
char buffer start..stop. If there is a text override in 'text',
use that to set the token's text. Override this method to emit
custom Token objects.
If you are building trees, then you should also override
Parser or TreeParser.getMissingSymbol().
"""
if token is None:
token = CommonToken(
input=self.input,
type=self._state.type,
channel=self._state.channel,
start=self._state.tokenStartCharIndex,
stop=self.getCharIndex()-1
)
token.line = self._state.tokenStartLine
token.text = self._state.text
token.charPositionInLine = self._state.tokenStartCharPositionInLine
self._state.token = token
return token
def match(self, s):
if isinstance(s, basestring):
for c in s:
if self.input.LA(1) != ord(c):
if self._state.backtracking > 0:
raise BacktrackingFailed
mte = MismatchedTokenException(c, self.input)
self.recover(mte)
raise mte
self.input.consume()
else:
if self.input.LA(1) != s:
if self._state.backtracking > 0:
raise BacktrackingFailed
mte = MismatchedTokenException(unichr(s), self.input)
self.recover(mte) # don't really recover; just consume in lexer
raise mte
self.input.consume()
def matchAny(self):
self.input.consume()
def matchRange(self, a, b):
if self.input.LA(1) < a or self.input.LA(1) > b:
if self._state.backtracking > 0:
raise BacktrackingFailed
mre = MismatchedRangeException(unichr(a), unichr(b), self.input)
self.recover(mre)
raise mre
self.input.consume()
def getLine(self):
return self.input.line
def getCharPositionInLine(self):
return self.input.charPositionInLine
def getCharIndex(self):
"""What is the index of the current character of lookahead?"""
return self.input.index()
def getText(self):
"""
Return the text matched so far for the current token or any
text override.
"""
if self._state.text is not None:
return self._state.text
return self.input.substring(
self._state.tokenStartCharIndex,
self.getCharIndex()-1
)
def setText(self, text):
"""
Set the complete text of this token; it wipes any previous
changes to the text.
"""
self._state.text = text
text = property(getText, setText)
def reportError(self, e):
## TODO: not thought about recovery in lexer yet.
## # if we've already reported an error and have not matched a token
## # yet successfully, don't report any errors.
## if self.errorRecovery:
## #System.err.print("[SPURIOUS] ");
## return;
##
## self.errorRecovery = True
self.displayRecognitionError(self.tokenNames, e)
def getErrorMessage(self, e, tokenNames):
msg = None
if isinstance(e, MismatchedTokenException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting " \
+ self.getCharErrorDisplay(e.expecting)
elif isinstance(e, NoViableAltException):
msg = "no viable alternative at character " \
+ self.getCharErrorDisplay(e.c)
elif isinstance(e, EarlyExitException):
msg = "required (...)+ loop did not match anything at character " \
+ self.getCharErrorDisplay(e.c)
elif isinstance(e, MismatchedNotSetException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, MismatchedSetException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, MismatchedRangeException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting set " \
+ self.getCharErrorDisplay(e.a) \
+ ".." \
+ self.getCharErrorDisplay(e.b)
else:
msg = BaseRecognizer.getErrorMessage(self, e, tokenNames)
return msg
def getCharErrorDisplay(self, c):
if c == EOF:
c = '<EOF>'
return repr(c)
def recover(self, re):
"""
Lexers can normally match any char in it's vocabulary after matching
a token, so do the easy thing and just kill a character and hope
it all works out. You can instead use the rule invocation stack
to do sophisticated error recovery if you are in a fragment rule.
"""
self.input.consume()
def traceIn(self, ruleName, ruleIndex):
inputSymbol = "%s line=%d:%s" % (self.input.LT(1),
self.getLine(),
self.getCharPositionInLine()
)
BaseRecognizer.traceIn(self, ruleName, ruleIndex, inputSymbol)
def traceOut(self, ruleName, ruleIndex):
inputSymbol = "%s line=%d:%s" % (self.input.LT(1),
self.getLine(),
self.getCharPositionInLine()
)
BaseRecognizer.traceOut(self, ruleName, ruleIndex, inputSymbol)
class Parser(BaseRecognizer):
"""
@brief Baseclass for generated parser classes.
"""
def __init__(self, lexer, state=None):
BaseRecognizer.__init__(self, state)
self.setTokenStream(lexer)
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
self.input.seek(0) # rewind the input
def getCurrentInputSymbol(self, input):
return input.LT(1)
def getMissingSymbol(self, input, e, expectedTokenType, follow):
if expectedTokenType == EOF:
tokenText = "<missing EOF>"
else:
tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
t = CommonToken(type=expectedTokenType, text=tokenText)
current = input.LT(1)
if current.type == EOF:
current = input.LT(-1)
if current is not None:
t.line = current.line
t.charPositionInLine = current.charPositionInLine
t.channel = DEFAULT_CHANNEL
return t
def setTokenStream(self, input):
"""Set the token stream and reset the parser"""
self.input = None
self.reset()
self.input = input
def getTokenStream(self):
return self.input
def getSourceName(self):
return self.input.getSourceName()
def traceIn(self, ruleName, ruleIndex):
BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
def traceOut(self, ruleName, ruleIndex):
BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
class RuleReturnScope(object):
"""
Rules can return start/stop info as well as possible trees and templates.
"""
def getStart(self):
"""Return the start token or tree."""
return None
def getStop(self):
"""Return the stop token or tree."""
return None
def getTree(self):
"""Has a value potentially if output=AST."""
return None
def getTemplate(self):
"""Has a value potentially if output=template."""
return None
class ParserRuleReturnScope(RuleReturnScope):
"""
Rules that return more than a single value must return an object
containing all the values. Besides the properties defined in
RuleLabelScope.predefinedRulePropertiesScope there may be user-defined
return values. This class simply defines the minimum properties that
are always defined and methods to access the others that might be
available depending on output option such as template and tree.
Note text is not an actual property of the return value, it is computed
from start and stop using the input stream's toString() method. I
could add a ctor to this so that we can pass in and store the input
stream, but I'm not sure we want to do that. It would seem to be undefined
to get the .text property anyway if the rule matches tokens from multiple
input streams.
I do not use getters for fields of objects that are used simply to
group values such as this aggregate. The getters/setters are there to
satisfy the superclass interface.
"""
def __init__(self):
self.start = None
self.stop = None
def getStart(self):
return self.start
def getStop(self):
return self.stop
| gpl-3.0 |
eloquence/unisubs | apps/externalsites/syncing/kaltura.py | 6 | 4831 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
"""externalsites.syncing.kaltura -- Sync subtitles to/from kaltura"""
from xml.dom import minidom
import requests
from externalsites.exceptions import SyncingError
from externalsites.syncing.kaltura_languages import KalturaLanguageMap
KALTURA_API_URL = 'http://www.kaltura.com/api_v3/'
SESSION_TYPE_USER = 0
SESSION_TYPE_ADMIN = 2
CAPTION_TYPE_DFXP = 2
CAPTION_TYPE_SRT = 1
CAPTION_TYPE_WEBVTT = 3
# partnerData value we set for subtitles that we've synced
PARTNER_DATA_TAG = 'synced-from-amara'
def _node_text(node):
return ''.join(child.nodeValue
for child in node.childNodes
if child.nodeType == child.TEXT_NODE)
def _find_child(node, tag_name):
return node.getElementsByTagName(tag_name)[0]
def _has_child(node, tag_name):
return len(node.getElementsByTagName(tag_name)) > 0
def _check_error(result):
"""Checks if we had an error result."""
if _has_child(result, 'error'):
error = _find_child(result, 'error')
code = _node_text(_find_child(error, 'code'))
message = _node_text(_find_child(error, 'message'))
raise SyncingError("%s: %s" % (code, message))
def _make_request(service, action, data):
params = { 'service': service, 'action': action, }
response = requests.post(KALTURA_API_URL, params=params, data=data)
dom = minidom.parseString(response.content)
try:
result = _find_child(dom, 'result')
except IndexError:
return None
_check_error(result)
return result
def _start_session(partner_id, secret):
result = _make_request('session', 'start', {
'secret': secret,
'partnerId': partner_id,
'type': SESSION_TYPE_ADMIN,
})
return _node_text(result)
def _end_session(ks):
_make_request('session', 'end', { 'ks': ks })
def _find_existing_captionset(ks, video_id, language_code):
language = KalturaLanguageMap.get_name(language_code)
result = _make_request('caption_captionasset', 'list', {
'ks': ks,
'filter:entryIdEqual': video_id,
})
objects = _find_child(result, 'objects')
for item in objects.getElementsByTagName('item'):
partner_data = _find_child(item, 'partnerData')
language_node = _find_child(item, 'language')
if (_node_text(partner_data) == PARTNER_DATA_TAG and
_node_text(language_node) == language):
return _node_text(_find_child(item, 'id'))
return None
def _add_captions(ks, video_id, language_code):
language = KalturaLanguageMap.get_name(language_code)
result = _make_request('caption_captionasset', 'add', {
'ks': ks,
'entryId': video_id,
'captionAsset:language': language,
'captionAsset:partnerData': PARTNER_DATA_TAG,
'captionAsset:format': CAPTION_TYPE_SRT,
'captionAsset:fileExt': 'srt',
})
return _node_text(_find_child(result, 'id'))
def _update_caption_content(ks, caption_id, sub_data):
_make_request('caption_captionasset', 'setcontent', {
'ks': ks,
'id': caption_id,
'contentResource:objectType': 'KalturaStringResource',
'contentResource:content': sub_data,
})
def _delete_captions(ks, caption_id):
_make_request('caption_captionasset', 'delete', {
'ks': ks,
'captionAssetId': caption_id,
})
def update_subtitles(partner_id, secret, video_id, language_code,
srt_data):
ks = _start_session(partner_id, secret)
try:
caption_id = _find_existing_captionset(ks, video_id, language_code)
if caption_id is None:
caption_id = _add_captions(ks, video_id, language_code)
_update_caption_content(ks, caption_id, srt_data)
finally:
_end_session(ks)
def delete_subtitles(partner_id, secret, video_id, language_code):
ks = _start_session(partner_id, secret)
try:
caption_id = _find_existing_captionset(ks, video_id, language_code)
if caption_id is not None:
_delete_captions(ks, caption_id)
finally:
_end_session(ks)
| agpl-3.0 |
TI-OpenLink/wl18xx | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
Cinntax/home-assistant | homeassistant/components/habitica/sensor.py | 13 | 2293 | """Support for Habitica sensors."""
from datetime import timedelta
import logging
from homeassistant.components import habitica
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the habitica platform."""
if discovery_info is None:
return
name = discovery_info[habitica.CONF_NAME]
sensors = discovery_info[habitica.CONF_SENSORS]
sensor_data = HabitipyData(hass.data[habitica.DOMAIN][name])
await sensor_data.update()
async_add_devices(
[HabitipySensor(name, sensor, sensor_data) for sensor in sensors], True
)
class HabitipyData:
"""Habitica API user data cache."""
def __init__(self, api):
"""Habitica API user data cache."""
self.api = api
self.data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def update(self):
"""Get a new fix from Habitica servers."""
self.data = await self.api.user.get()
class HabitipySensor(Entity):
"""A generic Habitica sensor."""
def __init__(self, name, sensor_name, updater):
"""Initialize a generic Habitica sensor."""
self._name = name
self._sensor_name = sensor_name
self._sensor_type = habitica.SENSORS_TYPES[sensor_name]
self._state = None
self._updater = updater
async def async_update(self):
"""Update Condition and Forecast."""
await self._updater.update()
data = self._updater.data
for element in self._sensor_type.path:
data = data[element]
self._state = data
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._sensor_type.icon
@property
def name(self):
"""Return the name of the sensor."""
return f"{habitica.DOMAIN}_{self._name}_{self._sensor_name}"
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._sensor_type.unit
| apache-2.0 |
azumimuo/family-xbmc-addon | plugin.video.SportsDevil/service/oscrypto/_pkcs1.py | 7 | 20306 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import hashlib
import math
import platform
import struct
import os
from asn1crypto.util import int_from_bytes, int_to_bytes
from asn1crypto.keys import PrivateKeyInfo, PublicKeyInfo
from asn1crypto.x509 import Certificate
from . import backend
from ._int import fill_width
from .util import constant_compare, rand_bytes
from ._errors import pretty_message
from ._types import type_name, byte_cls, int_types
if sys.version_info < (3,):
chr_cls = chr
range = xrange # noqa
else:
def chr_cls(num):
return bytes([num])
_backend = backend()
__all__ = [
'add_pss_padding',
'add_pkcs1v15_signature_padding',
'raw_rsa_private_crypt',
'raw_rsa_public_crypt',
'remove_pkcs1v15_encryption_padding',
'remove_pkcs1v15_signature_padding',
'verify_pss_padding',
]
def _is_osx_107():
"""
:return:
A bool if the current machine is running OS X 10.7
"""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
return tuple(map(int, version.split('.')))[0:2] == (10, 7)
def add_pss_padding(hash_algorithm, salt_length, key_length, message):
"""
Pads a byte string using the EMSA-PSS-Encode operation described in PKCS#1
v2.2.
:param hash_algorithm:
The string name of the hash algorithm to use: "sha1", "sha224",
"sha256", "sha384", "sha512"
:param salt_length:
The length of the salt as an integer - typically the same as the length
of the output from the hash_algorithm
:param key_length:
The length of the RSA key, in bits
:param message:
A byte string of the message to pad
:return:
The encoded (passed) message
"""
if _backend != 'winlegacy' and sys.platform != 'darwin':
raise SystemError(pretty_message(
'''
Pure-python RSA PSS signature padding addition code is only for
Windows XP/2003 and OS X
'''
))
if not isinstance(message, byte_cls):
raise TypeError(pretty_message(
'''
message must be a byte string, not %s
''',
type_name(message)
))
if not isinstance(salt_length, int_types):
raise TypeError(pretty_message(
'''
salt_length must be an integer, not %s
''',
type_name(salt_length)
))
if salt_length < 0:
raise ValueError(pretty_message(
'''
salt_length must be 0 or more - is %s
''',
repr(salt_length)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 512:
raise ValueError(pretty_message(
'''
key_length must be 512 or more - is %s
''',
repr(key_length)
))
if hash_algorithm not in set(['sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "sha1", "sha224", "sha256", "sha384",
"sha512", not %s
''',
repr(hash_algorithm)
))
hash_func = getattr(hashlib, hash_algorithm)
# The maximal bit size of a non-negative integer is one less than the bit
# size of the key since the first bit is used to store sign
em_bits = key_length - 1
em_len = int(math.ceil(em_bits / 8))
message_digest = hash_func(message).digest()
hash_length = len(message_digest)
if em_len < hash_length + salt_length + 2:
raise ValueError(pretty_message(
'''
Key is not long enough to use with specified hash_algorithm and
salt_length
'''
))
if salt_length > 0:
salt = os.urandom(salt_length)
else:
salt = b''
m_prime = (b'\x00' * 8) + message_digest + salt
m_prime_digest = hash_func(m_prime).digest()
padding = b'\x00' * (em_len - salt_length - hash_length - 2)
db = padding + b'\x01' + salt
db_mask = _mgf1(hash_algorithm, m_prime_digest, em_len - hash_length - 1)
masked_db = int_to_bytes(int_from_bytes(db) ^ int_from_bytes(db_mask))
masked_db = fill_width(masked_db, len(db_mask))
zero_bits = (8 * em_len) - em_bits
left_bit_mask = ('0' * zero_bits) + ('1' * (8 - zero_bits))
left_int_mask = int(left_bit_mask, 2)
if left_int_mask != 255:
masked_db = chr_cls(left_int_mask & ord(masked_db[0:1])) + masked_db[1:]
return masked_db + m_prime_digest + b'\xBC'
def verify_pss_padding(hash_algorithm, salt_length, key_length, message, signature):
"""
Verifies the PSS padding on an encoded message
:param hash_algorithm:
The string name of the hash algorithm to use: "sha1", "sha224",
"sha256", "sha384", "sha512"
:param salt_length:
The length of the salt as an integer - typically the same as the length
of the output from the hash_algorithm
:param key_length:
The length of the RSA key, in bits
:param message:
A byte string of the message to pad
:param signature:
The signature to verify
:return:
A boolean indicating if the signature is invalid
"""
if _backend != 'winlegacy' and sys.platform != 'darwin':
raise SystemError(pretty_message(
'''
Pure-python RSA PSS signature padding verification code is only for
Windows XP/2003 and OS X
'''
))
if not isinstance(message, byte_cls):
raise TypeError(pretty_message(
'''
message must be a byte string, not %s
''',
type_name(message)
))
if not isinstance(signature, byte_cls):
raise TypeError(pretty_message(
'''
signature must be a byte string, not %s
''',
type_name(signature)
))
if not isinstance(salt_length, int_types):
raise TypeError(pretty_message(
'''
salt_length must be an integer, not %s
''',
type_name(salt_length)
))
if salt_length < 0:
raise ValueError(pretty_message(
'''
salt_length must be 0 or more - is %s
''',
repr(salt_length)
))
if hash_algorithm not in set(['sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "sha1", "sha224", "sha256", "sha384",
"sha512", not %s
''',
repr(hash_algorithm)
))
hash_func = getattr(hashlib, hash_algorithm)
em_bits = key_length - 1
em_len = int(math.ceil(em_bits / 8))
message_digest = hash_func(message).digest()
hash_length = len(message_digest)
if em_len < hash_length + salt_length + 2:
return False
if signature[-1:] != b'\xBC':
return False
zero_bits = (8 * em_len) - em_bits
masked_db_length = em_len - hash_length - 1
masked_db = signature[0:masked_db_length]
first_byte = ord(masked_db[0:1])
bits_that_should_be_zero = first_byte >> (8 - zero_bits)
if bits_that_should_be_zero != 0:
return False
m_prime_digest = signature[masked_db_length:masked_db_length + hash_length]
db_mask = _mgf1(hash_algorithm, m_prime_digest, em_len - hash_length - 1)
left_bit_mask = ('0' * zero_bits) + ('1' * (8 - zero_bits))
left_int_mask = int(left_bit_mask, 2)
if left_int_mask != 255:
db_mask = chr_cls(left_int_mask & ord(db_mask[0:1])) + db_mask[1:]
db = int_to_bytes(int_from_bytes(masked_db) ^ int_from_bytes(db_mask))
if len(db) < len(masked_db):
db = (b'\x00' * (len(masked_db) - len(db))) + db
zero_length = em_len - hash_length - salt_length - 2
zero_string = b'\x00' * zero_length
if not constant_compare(db[0:zero_length], zero_string):
return False
if db[zero_length:zero_length + 1] != b'\x01':
return False
salt = db[0 - salt_length:]
m_prime = (b'\x00' * 8) + message_digest + salt
h_prime = hash_func(m_prime).digest()
return constant_compare(m_prime_digest, h_prime)
def _mgf1(hash_algorithm, seed, mask_length):
"""
The PKCS#1 MGF1 mask generation algorithm
:param hash_algorithm:
The string name of the hash algorithm to use: "sha1", "sha224",
"sha256", "sha384", "sha512"
:param seed:
A byte string to use as the seed for the mask
:param mask_length:
The desired mask length, as an integer
:return:
A byte string of the mask
"""
if not isinstance(seed, byte_cls):
raise TypeError(pretty_message(
'''
seed must be a byte string, not %s
''',
type_name(seed)
))
if not isinstance(mask_length, int_types):
raise TypeError(pretty_message(
'''
mask_length must be an integer, not %s
''',
type_name(mask_length)
))
if mask_length < 1:
raise ValueError(pretty_message(
'''
mask_length must be greater than 0 - is %s
''',
repr(mask_length)
))
if hash_algorithm not in set(['sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "sha1", "sha224", "sha256", "sha384",
"sha512", not %s
''',
repr(hash_algorithm)
))
output = b''
hash_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}[hash_algorithm]
iterations = int(math.ceil(mask_length / hash_length))
pack = struct.Struct(b'>I').pack
hash_func = getattr(hashlib, hash_algorithm)
for counter in range(0, iterations):
b = pack(counter)
output += hash_func(seed + b).digest()
return output[0:mask_length]
def add_pkcs1v15_signature_padding(key_length, data):
"""
Adds PKCS#1 v1.5 padding to a message to be signed
:param key_length:
An integer of the number of bytes in the key
:param data:
A byte string to pad
:return:
The padded data as a byte string
"""
if _backend != 'winlegacy':
raise SystemError(pretty_message(
'''
Pure-python RSA PKCSv1.5 signature padding addition code is only
for Windows XP/2003
'''
))
return _add_pkcs1v15_padding(key_length, data, 'signing')
def remove_pkcs1v15_signature_padding(key_length, data):
"""
Removes PKCS#1 v1.5 padding from a signed message using constant time
operations
:param key_length:
An integer of the number of bytes in the key
:param data:
A byte string to unpad
:return:
The unpadded data as a byte string
"""
if _backend != 'winlegacy':
raise SystemError(pretty_message(
'''
Pure-python RSA PKCSv1.5 signature padding removal code is only for
Windows XP/2003
'''
))
return _remove_pkcs1v15_padding(key_length, data, 'verifying')
def remove_pkcs1v15_encryption_padding(key_length, data):
"""
Removes PKCS#1 v1.5 padding from a decrypted message using constant time
operations
:param key_length:
An integer of the number of bytes in the key
:param data:
A byte string to unpad
:return:
The unpadded data as a byte string
"""
if not _is_osx_107():
raise SystemError(pretty_message(
'''
Pure-python RSA PKCSv1.5 encryption padding removal code is only
for OS X 10.7
'''
))
return _remove_pkcs1v15_padding(key_length, data, 'decrypting')
def _add_pkcs1v15_padding(key_length, data, operation):
"""
Adds PKCS#1 v1.5 padding to a message
:param key_length:
An integer of the number of bytes in the key
:param data:
A byte string to unpad
:param operation:
A unicode string of "encrypting" or "signing"
:return:
The padded data as a byte string
"""
if operation == 'encrypting':
second_byte = b'\x02'
else:
second_byte = b'\x01'
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 64:
raise ValueError(pretty_message(
'''
key_length must be 64 or more - is %s
''',
repr(key_length)
))
if len(data) > key_length - 11:
raise ValueError(pretty_message(
'''
data must be between 1 and %s bytes long - is %s
''',
key_length - 11,
len(data)
))
required_bytes = key_length - 3 - len(data)
padding = b''
while required_bytes > 0:
temp_padding = rand_bytes(required_bytes)
# Remove null bytes since they are markers in PKCS#1 v1.5
temp_padding = b''.join(temp_padding.split(b'\x00'))
padding += temp_padding
required_bytes -= len(temp_padding)
return b'\x00' + second_byte + padding + b'\x00' + data
def _remove_pkcs1v15_padding(key_length, data, operation):
"""
Removes PKCS#1 v1.5 padding from a message using constant time operations
:param key_length:
An integer of the number of bytes in the key
:param data:
A byte string to unpad
:param operation:
A unicode string of "decrypting" or "verifying"
:return:
The unpadded data as a byte string
"""
if operation == 'decrypting':
second_byte = 2
else:
second_byte = 1
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 64:
raise ValueError(pretty_message(
'''
key_length must be 64 or more - is %s
''',
repr(key_length)
))
if len(data) != key_length:
raise ValueError('Error %s' % operation)
error = 0
trash = 0
padding_end = 0
# Uses bitwise operations on an error variable and another trash variable
# to perform constant time error checking/token scanning on the data
for i in range(0, len(data)):
byte = data[i:i + 1]
byte_num = ord(byte)
# First byte should be \x00
if i == 0:
error |= byte_num
# Second byte should be \x02 for decryption, \x01 for verification
elif i == 1:
error |= int((byte_num | second_byte) != second_byte)
# Bytes 3-10 should not be \x00
elif i < 10:
error |= int((byte_num ^ 0) == 0)
# Byte 11 or after that is zero is end of padding
else:
non_zero = byte_num | 0
if padding_end == 0:
if non_zero:
trash |= i
else:
padding_end |= i
else:
if non_zero:
trash |= i
else:
trash |= i
if error != 0:
raise ValueError('Error %s' % operation)
return data[padding_end + 1:]
def raw_rsa_private_crypt(private_key, data):
"""
Performs a raw RSA algorithm in a byte string using a private key.
This is a low-level primitive and is prone to disastrous results if used
incorrectly.
:param private_key:
An oscrypto.asymmetric.PrivateKey object
:param data:
A byte string of the plaintext to be signed or ciphertext to be
decrypted. Must be less than or equal to the length of the private key.
In the case of signing, padding must already be applied. In the case of
decryption, padding must be removed afterward.
:return:
A byte string of the transformed data
"""
if _backend != 'winlegacy':
raise SystemError('Pure-python RSA crypt is only for Windows XP/2003')
if not hasattr(private_key, 'asn1') or not isinstance(private_key.asn1, PrivateKeyInfo):
raise TypeError(pretty_message(
'''
private_key must be an instance of the
oscrypto.asymmetric.PrivateKey class, not %s
''',
type_name(private_key)
))
algo = private_key.asn1['private_key_algorithm']['algorithm'].native
if algo != 'rsa':
raise ValueError(pretty_message(
'''
private_key must be an RSA key, not %s
''',
algo.upper()
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
rsa_private_key = private_key.asn1['private_key'].parsed
transformed_int = pow(
int_from_bytes(data),
rsa_private_key['private_exponent'].native,
rsa_private_key['modulus'].native
)
return int_to_bytes(transformed_int, width=private_key.asn1.byte_size)
def raw_rsa_public_crypt(certificate_or_public_key, data):
"""
Performs a raw RSA algorithm in a byte string using a certificate or
public key. This is a low-level primitive and is prone to disastrous results
if used incorrectly.
:param certificate_or_public_key:
An oscrypto.asymmetric.PublicKey or oscrypto.asymmetric.Certificate
object
:param data:
A byte string of the signature when verifying, or padded plaintext when
encrypting. Must be less than or equal to the length of the public key.
When verifying, padding will need to be removed afterwards. When
encrypting, padding must be applied before.
:return:
A byte string of the transformed data
"""
if _backend != 'winlegacy':
raise SystemError('Pure-python RSA crypt is only for Windows XP/2003')
has_asn1 = hasattr(certificate_or_public_key, 'asn1')
valid_types = (PublicKeyInfo, Certificate)
if not has_asn1 or not isinstance(certificate_or_public_key.asn1, valid_types):
raise TypeError(pretty_message(
'''
certificate_or_public_key must be an instance of the
oscrypto.asymmetric.PublicKey or oscrypto.asymmetric.Certificate
classes, not %s
''',
type_name(certificate_or_public_key)
))
algo = certificate_or_public_key.asn1['algorithm']['algorithm'].native
if algo != 'rsa':
raise ValueError(pretty_message(
'''
certificate_or_public_key must be an RSA key, not %s
''',
algo.upper()
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
rsa_public_key = certificate_or_public_key.asn1['public_key'].parsed
transformed_int = pow(
int_from_bytes(data),
rsa_public_key['public_exponent'].native,
rsa_public_key['modulus'].native
)
return int_to_bytes(
transformed_int,
width=certificate_or_public_key.asn1.byte_size
)
| gpl-2.0 |
alphatwirl/alphatwirl | alphatwirl/concurrently/SubprocessRunner.py | 1 | 3240 | # Tai Sakuma <tai.sakuma@gmail.com>
import os
import logging
import subprocess
import collections
##__________________________________________________________________||
class SubprocessRunner:
"""An example dispatcher which runs tasks in subprocesses
This class is an example of a dispatcher.
Note: This class is not for practical use as it doesn't limit the
number of subprocesses running concurrently.
"""
def __init__(self, pipe=False):
self.running_procs = collections.deque()
self.pipe = pipe
self.finished_pids = [ ]
def __repr__(self):
name_value_pairs = (
('pipe', self.pipe),
)
return '{}({})'.format(
self.__class__.__name__,
', '.join(['{}={!r}'.format(n, v) for n, v in name_value_pairs]),
)
def run(self, workingArea, package_index):
taskdir = workingArea.path
package_path = workingArea.package_relpath(package_index)
# run_script = os.path.join(taskdir, 'run.py') # This doesn't work.
# It contradicts with the document https://docs.python.org/2/library/subprocess.html
# The program's path needs to be relative to cwd
run_script = os.path.join('.', 'run.py') # This works
args = [run_script, package_path]
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE if self.pipe else None,
stderr=subprocess.PIPE if self.pipe else None,
cwd=taskdir
)
self.running_procs.append(proc)
return proc.pid # as runid
def run_multiple(self, workingArea, package_indices):
pids = [ ]
for pkgidx in package_indices:
pids.append(self.run(workingArea, pkgidx))
return pids
def poll(self):
"""check if the jobs are running and return a list of pids for
finished jobs
"""
finished_procs = [p for p in self.running_procs if p.poll() is not None]
self.running_procs = collections.deque([p for p in self.running_procs if p not in finished_procs])
for proc in finished_procs:
stdout, stderr = proc.communicate()
## proc.communicate() returns (stdout, stderr) when
## self.pipe = True. Otherwise they are (None, None)
finished_pids = [p.pid for p in finished_procs]
self.finished_pids.extend(finished_pids)
logger = logging.getLogger(__name__)
messages = 'Running: {}, Finished: {}'.format(len(self.running_procs), len(self.finished_pids))
logger.info(messages)
return finished_pids # as runids
def wait(self):
"""wait until all jobs finish and return a list of pids
"""
finished_pids = [ ]
while self.running_procs:
finished_pids.extend(self.poll())
return finished_pids # as runids
def failed_runids(self, runids):
pass
def terminate(self):
while self.running_procs:
proc = self.running_procs.popleft()
proc.terminate()
##__________________________________________________________________||
| bsd-3-clause |
chiragjogi/odoo | addons/payment_authorize/tests/test_authorize.py | 195 | 7565 | # -*- coding: utf-8 -*-
import hashlib
import hmac
import time
import urlparse
from lxml import objectify
import openerp
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_authorize.controllers.main import AuthorizeController
from openerp.tools import mute_logger
@openerp.tests.common.at_install(True)
@openerp.tests.common.post_install(True)
class AuthorizeCommon(PaymentAcquirerCommon):
def setUp(self):
super(AuthorizeCommon, self).setUp()
self.base_url = self.env['ir.config_parameter'].get_param('web.base.url')
# authorize only support USD in test environment
self.currency_usd = self.env['res.currency'].search([('name', '=', 'USD')], limit=1)[0]
# get the authorize account
model, self.authorize_id = self.env['ir.model.data'].get_object_reference('payment_authorize', 'payment_acquirer_authorize')
@openerp.tests.common.at_install(True)
@openerp.tests.common.post_install(True)
class AuthorizeForm(AuthorizeCommon):
def _authorize_generate_hashing(self, values):
data = '^'.join([
values['x_login'],
values['x_fp_sequence'],
values['x_fp_timestamp'],
values['x_amount'],
]) + '^'
return hmac.new(str(values['x_trans_key']), data, hashlib.md5).hexdigest()
def test_10_Authorize_form_render(self):
authorize = self.env['payment.acquirer'].browse(self.authorize_id)
self.assertEqual(authorize.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
form_values = {
'x_login': authorize.authorize_login,
'x_trans_key': authorize.authorize_transaction_key,
'x_amount': '320.0',
'x_show_form': 'PAYMENT_FORM',
'x_type': 'AUTH_CAPTURE',
'x_method': 'CC',
'x_fp_sequence': '%s%s' % (authorize.id, int(time.time())),
'x_version': '3.1',
'x_relay_response': 'TRUE',
'x_fp_timestamp': str(int(time.time())),
'x_relay_url': '%s' % urlparse.urljoin(self.base_url, AuthorizeController._return_url),
'x_cancel_url': '%s' % urlparse.urljoin(self.base_url, AuthorizeController._cancel_url),
'return_url': None,
'x_currency_code': 'USD',
'x_invoice_num': 'SO004',
'x_first_name': 'Norbert',
'x_last_name': 'Buyer',
'x_address': 'Huge Street 2/543',
'x_city': 'Sin City',
'x_zip': '1000',
'x_country': 'Belgium',
'x_phone': '0032 12 34 56 78',
'x_email': 'norbert.buyer@example.com',
'x_state': None,
}
form_values['x_fp_hash'] = self._authorize_generate_hashing(form_values)
# render the button
cr, uid, context = self.env.cr, self.env.uid, {}
res = self.payment_acquirer.render(
cr, uid, self.authorize_id, 'SO004', 320.0, self.currency_usd.id,
partner_id=None, partner_values=self.buyer_values, context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://test.authorize.net/gateway/transact.dll', 'Authorize: wrong form POST url')
for form_input in tree.input:
# Generated and received 'x_fp_hash' are always different so skeep it.
if form_input.get('name') in ['submit', 'x_fp_hash']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'Authorize: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_authorize.models.authorize', 'ValidationError')
def test_20_authorize_form_management(self):
cr, uid, context = self.env.cr, self.env.uid, {}
# be sure not to do stupid thing
authorize = self.env['payment.acquirer'].browse(self.authorize_id)
self.assertEqual(authorize.environment, 'test', 'test without test environment')
# typical data posted by authorize after client has successfully paid
authorize_post_data = {
'return_url': u'/shop/payment/validate',
'x_MD5_Hash': u'7934485E1C105940BE854208D10FAB4F',
'x_account_number': u'XXXX0027',
'x_address': u'Huge Street 2/543',
'x_amount': u'320.00',
'x_auth_code': u'E4W7IU',
'x_avs_code': u'Y',
'x_card_type': u'Visa',
'x_cavv_response': u'2',
'x_city': u'Sun City',
'x_company': u'',
'x_country': u'Belgium',
'x_cust_id': u'',
'x_cvv2_resp_code': u'',
'x_description': u'',
'x_duty': u'0.00',
'x_email': u'norbert.buyer@exampl',
'x_fax': u'',
'x_first_name': u'Norbert',
'x_freight': u'0.00',
'x_invoice_num': u'SO004',
'x_last_name': u'Buyer',
'x_method': u'CC',
'x_phone': u'0032 12 34 56 78',
'x_po_num': u'',
'x_response_code': u'1',
'x_response_reason_code': u'1',
'x_response_reason_text': u'This transaction has been approved.',
'x_ship_to_address': u'Huge Street 2/543',
'x_ship_to_city': u'Sun City',
'x_ship_to_company': u'',
'x_ship_to_country': u'Belgium',
'x_ship_to_first_name': u'Norbert',
'x_ship_to_last_name': u'Buyer',
'x_ship_to_state': u'',
'x_ship_to_zip': u'1000',
'x_state': u'',
'x_tax': u'0.00',
'x_tax_exempt': u'FALSE',
'x_test_request': u'false',
'x_trans_id': u'2217460311',
'x_type': u'auth_capture',
'x_zip': u'1000'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.form_feedback(cr, uid, authorize_post_data, 'authorize', context=context)
tx = self.env['payment.transaction'].create({
'amount': 320.0,
'acquirer_id': self.authorize_id,
'currency_id': self.currency_usd.id,
'reference': 'SO004',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id})
# validate it
self.payment_transaction.form_feedback(cr, uid, authorize_post_data, 'authorize', context=context)
# check state
self.assertEqual(tx.state, 'done', 'Authorize: validation did not put tx into done state')
self.assertEqual(tx.authorize_txnid, authorize_post_data.get('x_trans_id'), 'Authorize: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'authorize_txnid': False})
# simulate an error
authorize_post_data['x_response_code'] = u'3'
self.payment_transaction.form_feedback(cr, uid, authorize_post_data, 'authorize', context=context)
# check state
self.assertEqual(tx.state, 'error', 'Authorize: erroneous validation did not put tx into error state')
| agpl-3.0 |
lab305itep/linux | scripts/gdb/linux/modules.py | 774 | 2718 | #
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils
module_type = utils.CachedType("struct module")
def module_list():
global module_type
module_ptr_type = module_type.get_type().pointer()
modules = gdb.parse_and_eval("modules")
entry = modules['next']
end_of_list = modules.address
while entry != end_of_list:
yield utils.container_of(entry, module_ptr_type, "list")
entry = entry['next']
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(module['module_core']).split()[0],
name=module['name'].string(),
size=str(module['core_size']),
ref=str(module['refcnt']['counter'])))
source_list = module['source_list']
t = self._module_use_type.get_type().pointer()
entry = source_list['next']
first = True
while entry != source_list.address:
use = utils.container_of(entry, t, "source_list")
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
entry = entry['next']
gdb.write("\n")
LxLsmod()
| gpl-2.0 |
myang321/django | tests/template_tests/syntax_tests/test_list_index.py | 521 | 2694 | from django.test import SimpleTestCase
from ..utils import setup
class ListIndexTests(SimpleTestCase):
@setup({'list-index01': '{{ var.1 }}'})
def test_list_index01(self):
"""
List-index syntax allows a template to access a certain item of a
subscriptable object.
"""
output = self.engine.render_to_string('list-index01', {'var': ['first item', 'second item']})
self.assertEqual(output, 'second item')
@setup({'list-index02': '{{ var.5 }}'})
def test_list_index02(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index02', {'var': ['first item', 'second item']})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index03': '{{ var.1 }}'})
def test_list_index03(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index03', {'var': None})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index04': '{{ var.1 }}'})
def test_list_index04(self):
"""
Fail silently when variable is a dict without the specified key.
"""
output = self.engine.render_to_string('list-index04', {'var': {}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index05': '{{ var.1 }}'})
def test_list_index05(self):
"""
Dictionary lookup wins out when dict's key is a string.
"""
output = self.engine.render_to_string('list-index05', {'var': {'1': "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index06': '{{ var.1 }}'})
def test_list_index06(self):
"""
But list-index lookup wins out when dict's key is an int, which
behind the scenes is really a dictionary lookup (for a dict)
after converting the key to an int.
"""
output = self.engine.render_to_string('list-index06', {"var": {1: "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index07': '{{ var.1 }}'})
def test_list_index07(self):
"""
Dictionary lookup wins out when there is a string and int version
of the key.
"""
output = self.engine.render_to_string('list-index07', {"var": {'1': "hello", 1: "world"}})
self.assertEqual(output, 'hello')
| bsd-3-clause |
gem/django-extras | docs/_ext/applyxrefs.py | 132 | 1842 | """Adds xref targets to the top of files."""
import sys
import os
testing = False
DONT_TOUCH = (
'./index.txt',
)
def target_name(fn):
if fn.endswith('.txt'):
fn = fn[:-4]
return '_' + fn.lstrip('./').replace('/', '-')
def process_file(fn, lines):
lines.insert(0, '\n')
lines.insert(0, '.. %s:\n' % target_name(fn))
try:
with open(fn, 'w') as fp:
fp.writelines(lines)
except IOError:
print("Can't open %s for writing. Not touching it." % fn)
def has_target(fn):
try:
with open(fn, 'r') as fp:
lines = fp.readlines()
except IOError:
print("Can't open or read %s. Not touching it." % fn)
return (True, None)
#print fn, len(lines)
if len(lines) < 1:
print("Not touching empty file %s." % fn)
return (True, None)
if lines[0].startswith('.. _'):
return (True, None)
return (False, lines)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
argv.extend('.')
files = []
for root in argv[1:]:
for (dirpath, dirnames, filenames) in os.walk(root):
files.extend([(dirpath, f) for f in filenames])
files.sort()
files = [os.path.join(p, fn) for p, fn in files if fn.endswith('.txt')]
#print files
for fn in files:
if fn in DONT_TOUCH:
print("Skipping blacklisted file %s." % fn)
continue
target_found, lines = has_target(fn)
if not target_found:
if testing:
print('%s: %s' % (fn, lines[0]))
else:
print("Adding xref to %s" % fn)
process_file(fn, lines)
else:
print("Skipping %s: already has a xref" % fn)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/testbase/azure-mgmt-testbase/azure/mgmt/testbase/aio/operations/_customer_events_operations.py | 1 | 23311 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CustomerEventsOperations:
"""CustomerEventsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~test_base.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_test_base_account(
self,
resource_group_name: str,
test_base_account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CustomerEventListResult"]:
"""Lists all notification events subscribed under a Test Base Account.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomerEventListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~test_base.models.CustomerEventListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomerEventListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_test_base_account.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CustomerEventListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_test_base_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/customerEvents'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
test_base_account_name: str,
customer_event_name: str,
parameters: "_models.CustomerEventResource",
**kwargs: Any
) -> "_models.CustomerEventResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomerEventResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
'customerEventName': self._serialize.url("customer_event_name", customer_event_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CustomerEventResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('CustomerEventResource', pipeline_response)
if response.status_code == 201:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('CustomerEventResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/customerEvents/{customerEventName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
test_base_account_name: str,
customer_event_name: str,
parameters: "_models.CustomerEventResource",
**kwargs: Any
) -> AsyncLROPoller["_models.CustomerEventResource"]:
"""Create or replace a Test Base Customer Event.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:param customer_event_name: The resource name of the Test Base Customer event.
:type customer_event_name: str
:param parameters: Parameters supplied to create a Test Base CustomerEvent.
:type parameters: ~test_base.models.CustomerEventResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomerEventResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~test_base.models.CustomerEventResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomerEventResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
test_base_account_name=test_base_account_name,
customer_event_name=customer_event_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CustomerEventResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
'customerEventName': self._serialize.url("customer_event_name", customer_event_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/customerEvents/{customerEventName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
test_base_account_name: str,
customer_event_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
'customerEventName': self._serialize.url("customer_event_name", customer_event_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/customerEvents/{customerEventName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
test_base_account_name: str,
customer_event_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a Test Base Customer Event.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:param customer_event_name: The resource name of the Test Base Customer event.
:type customer_event_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
test_base_account_name=test_base_account_name,
customer_event_name=customer_event_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
'customerEventName': self._serialize.url("customer_event_name", customer_event_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/customerEvents/{customerEventName}'} # type: ignore
async def get(
self,
resource_group_name: str,
test_base_account_name: str,
customer_event_name: str,
**kwargs: Any
) -> "_models.CustomerEventResource":
"""Gets a Test Base CustomerEvent.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:param customer_event_name: The resource name of the Test Base Customer event.
:type customer_event_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomerEventResource, or the result of cls(response)
:rtype: ~test_base.models.CustomerEventResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomerEventResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
'customerEventName': self._serialize.url("customer_event_name", customer_event_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomerEventResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/customerEvents/{customerEventName}'} # type: ignore
| mit |
cbingos/hongmafund | xadmin/plugins/topnav.py | 2 | 2513 |
from django.template import loader
from django.utils.text import capfirst
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.filters import SEARCH_VAR
from xadmin.views import BaseAdminPlugin, CommAdminView
class TopNavPlugin(BaseAdminPlugin):
global_search_models = None
global_add_models = None
def get_context(self, context):
return context
# Block Views
def block_top_navbar(self, context, nodes):
search_models = []
site_name = self.admin_site.name
models = self.global_search_models or self.admin_site._registry.keys()
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "view"):
info = (app_label, model._meta.module_name)
if getattr(self.admin_site._registry[model], 'search_fields', None):
try:
search_models.append({
'title': _('Search %s') % capfirst(model._meta.verbose_name_plural),
'url': reverse('admin:%s_%s_changelist' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
nodes.append(loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'search_models': search_models, 'search_name': SEARCH_VAR}))
def block_top_navmenu(self, context, nodes):
add_models = []
site_name = self.admin_site.name
models = self.global_search_models or self.admin_site._registry.keys()
models = self.global_add_models or self.admin_site._registry.keys()
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "add"):
info = (app_label, model._meta.module_name)
try:
add_models.append({
'title': _('Add %s') % capfirst(model._meta.verbose_name),
'url': reverse('admin:%s_%s_add' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
nodes.append(
loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'add_models': add_models}))
site.register_plugin(TopNavPlugin, CommAdminView)
| mit |
Onager/dftimewolf | tests/lib/collectors/grr_base.py | 1 | 4708 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the GRR base collector."""
import unittest
import mock
from grr_api_client import errors as grr_errors
from dftimewolf.lib import state
from dftimewolf.lib.collectors import grr_base
from dftimewolf import config
ACCESS_FORBIDDEN_MAX = 3
class MockGRRObject(object):
"""Fake GRR object that will be used in the access forbidden wrapper test"""
_access_forbidden_counter = 0
CreateApproval = mock.MagicMock()
hunt_id = "123"
client_id = "321"
# pylint: disable=unused-argument
def ForbiddenFunction(self, random1, random2, random3=None, random4=None):
"""Will raise a grr_errors.AccessForbiddenError three times, and return."""
while ACCESS_FORBIDDEN_MAX > self._access_forbidden_counter:
self._access_forbidden_counter += 1
raise grr_errors.AccessForbiddenError
return 4
class GRRBaseModuleTest(unittest.TestCase):
"""Tests for the GRR base collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
self.assertIsNotNone(grr_base_module)
@mock.patch('tempfile.mkdtemp')
@mock.patch('grr_api_client.api.InitHttp')
def testSetup(self, mock_grr_inithttp, mock_mkdtemp):
"""Tests that setup works"""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
mock_mkdtemp.return_value = '/fake'
grr_base_module.SetUp(
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin1',
grr_password='admin2',
approvers='approver1@example.com,approver2@example.com',
verify=True
)
mock_grr_inithttp.assert_called_with(
api_endpoint='http://fake/endpoint',
auth=('admin1', 'admin2'),
verify=True)
self.assertEqual(grr_base_module.approvers,
['approver1@example.com', 'approver2@example.com'])
self.assertEqual(grr_base_module.output_path, '/fake')
def testApprovalWrapper(self):
"""Tests that the approval wrapper works correctly."""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
grr_base_module.SetUp(
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin1',
grr_password='admin2',
approvers='approver1@example.com,approver2@example.com',
verify=True
)
# pylint: disable=protected-access
grr_base_module._CHECK_APPROVAL_INTERVAL_SEC = 0
mock_grr_object = MockGRRObject()
mock_forbidden_function = mock.Mock(
wraps=mock_grr_object.ForbiddenFunction)
result = grr_base_module._WrapGRRRequestWithApproval(
mock_grr_object,
mock_forbidden_function,
'random1',
'random2',
random3=4,
random4=4)
# Final result.
self.assertEqual(result, 4)
mock_forbidden_function.assert_called_with(
'random1', 'random2', random3=4, random4=4)
# Our forbidden function should be called 4 times, the last one succeeeding.
self.assertEqual(mock_forbidden_function.call_count, 4)
mock_grr_object.CreateApproval.assert_called_with(
reason='random reason',
notified_users=['approver1@example.com', 'approver2@example.com'])
def testNoApproversErrorsOut(self):
"""Tests that an error is generated if no approvers are specified.
This should only error on unauthorized objects, which is how our mock
behaves.
"""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
grr_base_module.SetUp(
reason='random',
grr_server_url='http://fake/url',
grr_username='admin1',
grr_password='admin2',
approvers='',
verify=True
)
# pylint: disable=protected-access
grr_base_module._CHECK_APPROVAL_INTERVAL_SEC = 0
mock_grr_object = MockGRRObject()
mock_forbidden_function = mock.Mock(
wraps=mock_grr_object.ForbiddenFunction)
result = grr_base_module._WrapGRRRequestWithApproval(
mock_grr_object,
mock_forbidden_function,
'random1',
'random2',
random3=4,
random4=4)
self.assertIsNone(result)
# Only one error message is generateds
self.assertEqual(len(test_state.errors), 1)
# Correct error message is generated
self.assertIn('no approvers specified', test_state.errors[0][0])
self.assertTrue(test_state.errors[0][1]) # critical=True
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
cmbiwer/pycbc | examples/overlap.py | 6 | 1519 | from pycbc.waveform import get_td_waveform
from pycbc.filter import match, overlap
from pycbc.psd import aLIGOZeroDetHighPower
# Buffer size in seconds. This is presumed to be
# longer than the longest waveform.
time_buffer = 4
f_low = 30
sample_rate = 4096
# Length of corresponding time series and frequency series
tlen = sample_rate * time_buffer
flen = tlen / 2 + 1
delta_t = 1.0 / sample_rate
delta_f = 1.0 / time_buffer
print("Generating waveform 1")
hp, hc = get_td_waveform(approximant="EOBNRv2",
mass1=10,
mass2=10,
f_lower=f_low,
delta_t=1.0/4096)
print("waveform is %s seconds long" % hp.duration)
print("Generating waveform 2")
sp, sc = get_td_waveform(approximant="TaylorT4",
mass1=10,
mass2=10,
f_lower=f_low,
delta_t=1.0/4096)
print("waveform is %s seconds long" % sp.duration)
# Ensure that the waveforms are resized to the same length
sp.resize(tlen)
hp.resize(tlen)
print("Calculating analytic PSD")
psd = aLIGOZeroDetHighPower(flen, delta_f, f_low)
print("Calculating match and overlap")
# Note: This takes a while the first time as an FFT plan is generated
# subsequent calls within the same program will be faster
m, i = match(hp, sp, psd=psd, low_frequency_cutoff=f_low)
o = overlap(hp, sp, psd=psd, low_frequency_cutoff=f_low)
print("Overlap %s" % o)
print("Maximized Overlap %s" % m)
| gpl-3.0 |
pferreir/indico-backup | indico/MaKaC/common/security.py | 2 | 4683 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from indico.core.config import Config
from MaKaC.common.utils import encodeUnicode
from MaKaC.errors import MaKaCError, HtmlForbiddenTag
from MaKaC.webinterface.common.tools import escape_html, restrictedHTML
"""
base module for HTML security
"""
class Sanitization(object):
@staticmethod
def _sanitize(params, level, doNotSanitize=[]):
for i in params:
if i in doNotSanitize:
continue
if isinstance(params, dict):
param = params[i]
else:
param = i
if isinstance(param, str):
res = restrictedHTML(param, level)
if res is not None:
raise HtmlForbiddenTag(res)
elif isinstance(param, list) or isinstance(param, dict):
Sanitization._sanitize(param, level)
@staticmethod
def _escapeHTML(params, doNotSanitize=[]):
index = 0
for i in params:
if i in doNotSanitize:
continue
# params can be a list or a dictonary
# we need to define k depending if it is a list or a dictonary
# in order to be able to do such a operation: params[k] = something.
if isinstance(params, dict):
param = params[i]
k = i
else:
param = i
k = index # since we are looping a list, we need to increment the index to
index += 1 # get the correct 'k' in the next iteration.
if isinstance(param, str):
params[k] = escape_html(param)
elif isinstance(param, list) or isinstance(param, dict):
Sanitization._escapeHTML(param)
@staticmethod
def _encodeUnicode(params):
index = 0
for i in params:
# params can be a list or a dictonary
# we need to define k depending if it is a list or a dictonary
# in order to be able to do such a operation: params[k] = something.
if isinstance(params, dict):
param = params[i]
k = i
else:
param = i
k = index # since we are looping a list, we need to increment the index to
index += 1 # get the correct 'k' in the next iteration.
if isinstance(param, str) and param != "":
params[k] = encodeUnicode(param)
if params[k] == "":
raise MaKaCError(_("Your browser is using an encoding which is not recognized by Indico... Please make sure you set your browser encoding to utf-8"))
elif isinstance(param, list) or isinstance(param, dict):
Sanitization._encodeUnicode(param)
@staticmethod
def sanitizationCheck(target, params, accessWrapper, doNotSanitize=[]):
# first make sure all params are utf-8
Sanitization._encodeUnicode(params)
# then check the security level of data sent to the server
# if no user logged in, then no html allowed
if accessWrapper.getUser():
level = Config.getInstance().getSanitizationLevel()
elif target and hasattr(target, "canModify") and target.canModify(accessWrapper):
# not logged user, but use a modification key
level = Config.getInstance().getSanitizationLevel()
else:
level = 0
if level not in range(4):
level = 1
if level == 0:
#Escape all HTML tags
Sanitization._escapeHTML(params, doNotSanitize)
elif level in [1, 2]:
#level 1 or default: raise error if script or style detected
#level 2: raise error if script but style accepted
Sanitization._sanitize(params, level, doNotSanitize)
elif level == 3:
# Absolutely no checks
return
| gpl-3.0 |
sslavic/kafka | tests/kafkatest/services/trogdor/network_partition_fault_spec.py | 17 | 1816 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.trogdor.task_spec import TaskSpec
class NetworkPartitionFaultSpec(TaskSpec):
"""
The specification for a network partition fault.
Network partition faults fracture the network into different partitions
that cannot communicate with each other.
"""
def __init__(self, start_ms, duration_ms, partitions):
"""
Create a new NetworkPartitionFaultSpec.
:param start_ms: The start time, as described in task_spec.py
:param duration_ms: The duration in milliseconds.
:param partitions: An array of arrays describing the partitions.
The inner arrays may contain either node names,
or ClusterNode objects.
"""
super(NetworkPartitionFaultSpec, self).__init__(start_ms, duration_ms)
self.message["class"] = "org.apache.kafka.trogdor.fault.NetworkPartitionFaultSpec"
self.message["partitions"] = [TaskSpec.to_node_names(p) for p in partitions]
| apache-2.0 |
translationexchange/tml-python | tml/api/snapshot.py | 1 | 4664 | # encoding: UTF-8
"""
# Copyright (c) 2015, Translation Exchange, Inc.
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import re
from codecs import open
from ..strings import to_string
from . import AbstractClient, APIError
from tarfile import TarFile
from json import loads
from os.path import isdir, exists
from ..exceptions import Error
__author__ = 'xepa4ep, a@toukmanov.ru'
REWRITE_RULES = (
('projects/current/definition', 'application'),
(r'^languages\/(\w+)\/definition$', '%(0)s/language')
)
class SnapshotDir(AbstractClient):
""" Client which works with a snapshot """
def __init__(self, path):
""" .ctor
Args:
path (string): path to dir with snapshot
"""
super(SnapshotDir, self).__init__()
self.path = path
def call(self, url, method, params = None, opts=None):
""" Make request to API
Args:
url (string): URL
method (string): HTTP method (get|post|put|delete)
params (dict): params
Raises:
APIError: API returns error
Returns:
dict: response
"""
if method != 'get':
raise MethodIsNotSupported('Only get allowed in snapshot mode',
url,
params)
try:
return self.fetch(SnapshotDir.rewrite_path(url))
except Exception as invalid_path:
raise APIError(invalid_path, self, url)
def fetch(self, path):
""" Fetch data for path from file """
path = '%s/%s.json' % (self.path, path)
with open(path, encoding='utf-8') as fp:
return loads(to_string(fp.read()))
@classmethod
def rewrite_path(cls, url):
""" Build path from URL
Args:
url (string): API url
Returns:
string: path in snapshot matches API URL
"""
for pattern, replacer in REWRITE_RULES:
if pattern == url: # if equal
return replacer
else: # if match by regex
match_obj = re.compile(pattern).match(url)
if not match_obj:
continue
ctx = dict([(str(idx), v) for idx, v
in enumerate(match_obj.groups())])
return replacer % ctx
return url
class MethodIsNotSupported(APIError):
""" Try to execute not GET request """
MESSAGE = 'Method %s is not supported'
def __init__(self, method, url, client):
super(MethodIsNotSupported, self).__init__(self.MESSAGE % method,
client,
url)
class SnapshotFile(SnapshotDir):
""" .tar.gz snapshot file """
@property
def file(self):
""" Open tar file on demand """
return TarFile.open(self.path, 'r')
def fetch(self, path):
fp = None
try:
fp = self.file.extractfile('%s.json' % path)
ret = loads(fp.read().decode('utf-8'))
return ret
finally:
if fp:
fp.close()
def open_snapshot(path):
""" Open snapshot file or directory
Args:
path (string): path to file or dir
Returns:
SnapshotDir|SnapshotFile
"""
if not exists(path):
raise Error('Snapshot %s does not exists' % path)
if isdir(path):
return SnapshotDir(path)
else:
return SnapshotFile(path)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.