repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
pshen/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py | 7 | 25136 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_vpc_route_table
short_description: Manage route tables for AWS virtual private clouds
description:
- Manage route tables for AWS virtual private clouds
version_added: "2.0"
author: Robert Estelle (@erydo), Rob White (@wimnat)
options:
lookup:
description:
- "Look up route table by either tags or by route table ID. Non-unique tag lookup will fail.
If no tags are specified then no lookup for an existing route table is performed and a new
route table will be created. To change tags of a route table or delete a route table,
you must look up by id."
required: false
default: tag
choices: [ 'tag', 'id' ]
propagating_vgw_ids:
description:
- "Enable route propagation from virtual gateways specified by ID."
default: None
required: false
purge_routes:
version_added: "2.3"
description:
- "Purge existing routes that are not found in routes."
required: false
default: 'true'
aliases: []
purge_subnets:
version_added: "2.3"
description:
- "Purge existing subnets that are not found in subnets."
required: false
default: 'true'
aliases: []
route_table_id:
description:
- "The ID of the route table to update or delete."
required: false
default: null
routes:
description:
- "List of routes in the route table.
Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id',
'instance_id', 'interface_id', or 'vpc_peering_connection_id'.
If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. Routes are required for present states."
required: false
default: None
state:
description:
- "Create or destroy the VPC route table"
required: false
default: present
choices: [ 'present', 'absent' ]
subnets:
description:
- "An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'."
required: true
tags:
description:
- "A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }. Tags are
used to uniquely identify route tables within a VPC when the route_table_id is not supplied."
required: false
default: null
aliases: [ "resource_tags" ]
vpc_id:
description:
- "VPC ID of the VPC in which to create the route table."
required: true
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic creation example:
- name: Set up public subnet route table
ec2_vpc_route_table:
vpc_id: vpc-1245678
region: us-west-1
tags:
Name: Public
subnets:
- "{{ jumpbox_subnet.subnet.id }}"
- "{{ frontend_subnet.subnet.id }}"
- "{{ vpn_subnet.subnet_id }}"
routes:
- dest: 0.0.0.0/0
gateway_id: "{{ igw.gateway_id }}"
register: public_route_table
- name: Set up NAT-protected route table
ec2_vpc_route_table:
vpc_id: vpc-1245678
region: us-west-1
tags:
Name: Internal
subnets:
- "{{ application_subnet.subnet.id }}"
- 'Database Subnet'
- '10.0.0.0/8'
routes:
- dest: 0.0.0.0/0
instance_id: "{{ nat.instance_id }}"
register: nat_route_table
'''
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
if __name__ != '__main__':
raise
class AnsibleRouteTableException(Exception):
def __init__(self, message, error_traceback=None):
self.message = message
self.error_traceback = error_traceback
class AnsibleIgwSearchException(AnsibleRouteTableException):
pass
class AnsibleTagCreationException(AnsibleRouteTableException):
pass
class AnsibleSubnetSearchException(AnsibleRouteTableException):
pass
CIDR_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$')
SUBNET_RE = re.compile('^subnet-[A-z0-9]+$')
ROUTE_TABLE_RE = re.compile('^rtb-[A-z0-9]+$')
def find_subnets(vpc_conn, vpc_id, identified_subnets):
"""
Finds a list of subnets, each identified either by a raw ID, a unique
'Name' tag, or a CIDR such as 10.0.0.0/8.
Note that this function is duplicated in other ec2 modules, and should
potentially be moved into potentially be moved into a shared module_utils
"""
subnet_ids = []
subnet_names = []
subnet_cidrs = []
for subnet in (identified_subnets or []):
if re.match(SUBNET_RE, subnet):
subnet_ids.append(subnet)
elif re.match(CIDR_RE, subnet):
subnet_cidrs.append(subnet)
else:
subnet_names.append(subnet)
subnets_by_id = []
if subnet_ids:
subnets_by_id = vpc_conn.get_all_subnets(
subnet_ids, filters={'vpc_id': vpc_id})
for subnet_id in subnet_ids:
if not any(s.id == subnet_id for s in subnets_by_id):
raise AnsibleSubnetSearchException(
'Subnet ID "{0}" does not exist'.format(subnet_id))
subnets_by_cidr = []
if subnet_cidrs:
subnets_by_cidr = vpc_conn.get_all_subnets(
filters={'vpc_id': vpc_id, 'cidr': subnet_cidrs})
for cidr in subnet_cidrs:
if not any(s.cidr_block == cidr for s in subnets_by_cidr):
raise AnsibleSubnetSearchException(
'Subnet CIDR "{0}" does not exist'.format(cidr))
subnets_by_name = []
if subnet_names:
subnets_by_name = vpc_conn.get_all_subnets(
filters={'vpc_id': vpc_id, 'tag:Name': subnet_names})
for name in subnet_names:
matching_count = len([1 for s in subnets_by_name if s.tags.get('Name') == name])
if matching_count == 0:
raise AnsibleSubnetSearchException(
'Subnet named "{0}" does not exist'.format(name))
elif matching_count > 1:
raise AnsibleSubnetSearchException(
'Multiple subnets named "{0}"'.format(name))
return subnets_by_id + subnets_by_cidr + subnets_by_name
def find_igw(vpc_conn, vpc_id):
"""
Finds the Internet gateway for the given VPC ID.
Raises an AnsibleIgwSearchException if either no IGW can be found, or more
than one found for the given VPC.
Note that this function is duplicated in other ec2 modules, and should
potentially be moved into potentially be moved into a shared module_utils
"""
igw = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if not igw:
raise AnsibleIgwSearchException('No IGW found for VPC {0}'.
format(vpc_id))
elif len(igw) == 1:
return igw[0].id
else:
raise AnsibleIgwSearchException('Multiple IGWs found for VPC {0}'.
format(vpc_id))
def get_resource_tags(vpc_conn, resource_id):
return dict((t.name, t.value) for t in
vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
def tags_match(match_tags, candidate_tags):
return all((k in candidate_tags and candidate_tags[k] == v
for k, v in match_tags.items()))
def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
try:
cur_tags = get_resource_tags(vpc_conn, resource_id)
if tags == cur_tags:
return {'changed': False, 'tags': cur_tags}
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
if to_delete and not add_only:
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
if to_add:
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
latest_tags = get_resource_tags(vpc_conn, resource_id)
return {'changed': True, 'tags': latest_tags}
except EC2ResponseError as e:
raise AnsibleTagCreationException(
message='Unable to update tags for {0}, error: {1}'.format(resource_id, e),
error_traceback=traceback.format_exc())
def get_route_table_by_id(vpc_conn, vpc_id, route_table_id):
route_table = None
route_tables = vpc_conn.get_all_route_tables(route_table_ids=[route_table_id], filters={'vpc_id': vpc_id})
if route_tables:
route_table = route_tables[0]
return route_table
def get_route_table_by_tags(vpc_conn, vpc_id, tags):
count = 0
route_table = None
route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id})
for table in route_tables:
this_tags = get_resource_tags(vpc_conn, table.id)
if tags_match(tags, this_tags):
route_table = table
count += 1
if count > 1:
raise RuntimeError("Tags provided do not identify a unique route table")
else:
return route_table
def route_spec_matches_route(route_spec, route):
key_attr_map = {
'destination_cidr_block': 'destination_cidr_block',
'gateway_id': 'gateway_id',
'instance_id': 'instance_id',
'interface_id': 'interface_id',
'vpc_peering_connection_id': 'vpc_peering_connection_id',
}
# This is a workaround to catch managed NAT gateways as they do not show
# up in any of the returned values when describing route tables.
# The caveat of doing it this way is that if there was an existing
# route for another nat gateway in this route table there is not a way to
# change to another nat gateway id. Long term solution would be to utilise
# boto3 which is a very big task for this module or to update boto.
if route_spec.get('gateway_id') and 'nat-' in route_spec['gateway_id']:
if route.destination_cidr_block == route_spec['destination_cidr_block']:
if all((not route.gateway_id, not route.instance_id, not route.interface_id, not route.vpc_peering_connection_id)):
return True
for k in key_attr_map:
if k in route_spec:
if route_spec[k] != getattr(route, k):
return False
return True
def rename_key(d, old_key, new_key):
d[new_key] = d[old_key]
del d[old_key]
def index_of_matching_route(route_spec, routes_to_match):
for i, route in enumerate(routes_to_match):
if route_spec_matches_route(route_spec, route):
return i
def ensure_routes(vpc_conn, route_table, route_specs, propagating_vgw_ids,
check_mode, purge_routes):
routes_to_match = list(route_table.routes)
route_specs_to_create = []
for route_spec in route_specs:
i = index_of_matching_route(route_spec, routes_to_match)
if i is None:
route_specs_to_create.append(route_spec)
else:
del routes_to_match[i]
# NOTE: As of boto==2.38.0, the origin of a route is not available
# (for example, whether it came from a gateway with route propagation
# enabled). Testing for origin == 'EnableVgwRoutePropagation' is more
# correct than checking whether the route uses a propagating VGW.
# The current logic will leave non-propagated routes using propagating
# VGWs in place.
routes_to_delete = []
if purge_routes:
for r in routes_to_match:
if r.gateway_id:
if r.gateway_id != 'local' and not r.gateway_id.startswith('vpce-'):
if not propagating_vgw_ids or r.gateway_id not in propagating_vgw_ids:
routes_to_delete.append(r)
else:
routes_to_delete.append(r)
changed = bool(routes_to_delete or route_specs_to_create)
if changed:
for route in routes_to_delete:
try:
vpc_conn.delete_route(route_table.id,
route.destination_cidr_block,
dry_run=check_mode)
except EC2ResponseError as e:
if e.error_code == 'DryRunOperation':
pass
for route_spec in route_specs_to_create:
try:
vpc_conn.create_route(route_table.id,
dry_run=check_mode,
**route_spec)
except EC2ResponseError as e:
if e.error_code == 'DryRunOperation':
pass
return {'changed': bool(changed)}
def ensure_subnet_association(vpc_conn, vpc_id, route_table_id, subnet_id,
check_mode):
route_tables = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': subnet_id, 'vpc_id': vpc_id}
)
for route_table in route_tables:
if route_table.id is None:
continue
for a in route_table.associations:
if a.subnet_id == subnet_id:
if route_table.id == route_table_id:
return {'changed': False, 'association_id': a.id}
else:
if check_mode:
return {'changed': True}
vpc_conn.disassociate_route_table(a.id)
association_id = vpc_conn.associate_route_table(route_table_id, subnet_id)
return {'changed': True, 'association_id': association_id}
def ensure_subnet_associations(vpc_conn, vpc_id, route_table, subnets,
check_mode, purge_subnets):
current_association_ids = [a.id for a in route_table.associations]
new_association_ids = []
changed = False
for subnet in subnets:
result = ensure_subnet_association(
vpc_conn, vpc_id, route_table.id, subnet.id, check_mode)
changed = changed or result['changed']
if changed and check_mode:
return {'changed': True}
new_association_ids.append(result['association_id'])
if purge_subnets:
to_delete = [a_id for a_id in current_association_ids
if a_id not in new_association_ids]
for a_id in to_delete:
changed = True
vpc_conn.disassociate_route_table(a_id, dry_run=check_mode)
return {'changed': changed}
def ensure_propagation(vpc_conn, route_table, propagating_vgw_ids,
check_mode):
# NOTE: As of boto==2.38.0, it is not yet possible to query the existing
# propagating gateways. However, EC2 does support this as shown in its API
# documentation. For now, a reasonable proxy for this is the presence of
# propagated routes using the gateway in the route table. If such a route
# is found, propagation is almost certainly enabled.
changed = False
for vgw_id in propagating_vgw_ids:
for r in list(route_table.routes):
if r.gateway_id == vgw_id:
return {'changed': False}
changed = True
vpc_conn.enable_vgw_route_propagation(route_table.id,
vgw_id,
dry_run=check_mode)
return {'changed': changed}
def ensure_route_table_absent(connection, module):
lookup = module.params.get('lookup')
route_table_id = module.params.get('route_table_id')
tags = module.params.get('tags')
vpc_id = module.params.get('vpc_id')
purge_subnets = module.params.get('purge_subnets')
if lookup == 'tag':
if tags is not None:
try:
route_table = get_route_table_by_tags(connection, vpc_id, tags)
except EC2ResponseError as e:
module.fail_json(msg="Error finding route table with lookup 'tag': {0}".format(e.message),
exception=traceback.format_exc())
except RuntimeError as e:
module.fail_json(msg=e.args[0], exception=traceback.format_exc())
else:
route_table = None
elif lookup == 'id':
try:
route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
except EC2ResponseError as e:
module.fail_json(msg="Error finding route table with lookup 'id': {0}".format(e.message),
exception=traceback.format_exc())
if route_table is None:
return {'changed': False}
# disassociate subnets before deleting route table
ensure_subnet_associations(connection, vpc_id, route_table, [], module.check_mode, purge_subnets)
try:
connection.delete_route_table(route_table.id, dry_run=module.check_mode)
except EC2ResponseError as e:
if e.error_code == 'DryRunOperation':
pass
else:
module.fail_json(msg="Error deleting route table: {0}".format(e.message),
exception=traceback.format_exc())
return {'changed': True}
def get_route_table_info(route_table):
# Add any routes to array
routes = []
for route in route_table.routes:
routes.append(route.__dict__)
route_table_info = {'id': route_table.id,
'routes': routes,
'tags': route_table.tags,
'vpc_id': route_table.vpc_id}
return route_table_info
def create_route_spec(connection, module, vpc_id):
routes = module.params.get('routes')
for route_spec in routes:
rename_key(route_spec, 'dest', 'destination_cidr_block')
if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
igw = find_igw(connection, vpc_id)
route_spec['gateway_id'] = igw
return routes
def ensure_route_table_present(connection, module):
lookup = module.params.get('lookup')
propagating_vgw_ids = module.params.get('propagating_vgw_ids')
purge_routes = module.params.get('purge_routes')
purge_subnets = module.params.get('purge_subnets')
route_table_id = module.params.get('route_table_id')
subnets = module.params.get('subnets')
tags = module.params.get('tags')
vpc_id = module.params.get('vpc_id')
try:
routes = create_route_spec(connection, module, vpc_id)
except AnsibleIgwSearchException as e:
module.fail_json(msg="Failed to find the Internet gateway for the given VPC ID {0}: {1}".format(vpc_id, e[0]),
exception=traceback.format_exc())
changed = False
tags_valid = False
if lookup == 'tag':
if tags is not None:
try:
route_table = get_route_table_by_tags(connection, vpc_id, tags)
except EC2ResponseError as e:
module.fail_json(msg="Error finding route table with lookup 'tag': {0}".format(e.message),
exception=traceback.format_exc())
except RuntimeError as e:
module.fail_json(msg=e.args[0], exception=traceback.format_exc())
else:
route_table = None
elif lookup == 'id':
try:
route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
except EC2ResponseError as e:
module.fail_json(msg="Error finding route table with lookup 'id': {0}".format(e.message),
exception=traceback.format_exc())
# If no route table returned then create new route table
if route_table is None:
try:
route_table = connection.create_route_table(vpc_id, module.check_mode)
changed = True
except EC2ResponseError as e:
if e.error_code == 'DryRunOperation':
module.exit_json(changed=True)
module.fail_json(msg="Failed to create route table: {0}".format(e.message),
exception=traceback.format_exc())
if routes is not None:
try:
result = ensure_routes(connection, route_table, routes,
propagating_vgw_ids, module.check_mode,
purge_routes)
changed = changed or result['changed']
except EC2ResponseError as e:
module.fail_json(msg="Error while updating routes: {0}".format(e.message),
exception=traceback.format_exc())
if propagating_vgw_ids is not None:
result = ensure_propagation(connection, route_table,
propagating_vgw_ids,
check_mode=module.check_mode)
changed = changed or result['changed']
if not tags_valid and tags is not None:
result = ensure_tags(connection, route_table.id, tags,
add_only=True, check_mode=module.check_mode)
route_table.tags = result['tags']
changed = changed or result['changed']
if subnets:
associated_subnets = []
try:
associated_subnets = find_subnets(connection, vpc_id, subnets)
except EC2ResponseError as e:
raise AnsibleRouteTableException(
message='Unable to find subnets for route table {0}, error: {1}'
.format(route_table, e),
error_traceback=traceback.format_exc()
)
try:
result = ensure_subnet_associations(connection, vpc_id, route_table,
associated_subnets,
module.check_mode,
purge_subnets)
changed = changed or result['changed']
except EC2ResponseError as e:
raise AnsibleRouteTableException(
message='Unable to associate subnets for route table {0}, error: {1}'
.format(route_table, e),
error_traceback=traceback.format_exc()
)
module.exit_json(changed=changed, route_table=get_route_table_info(route_table))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
lookup=dict(default='tag', required=False, choices=['tag', 'id']),
propagating_vgw_ids=dict(default=None, required=False, type='list'),
purge_routes=dict(default=True, type='bool'),
purge_subnets=dict(default=True, type='bool'),
route_table_id=dict(default=None, required=False),
routes=dict(default=[], required=False, type='list'),
state=dict(default='present', choices=['present', 'absent']),
subnets=dict(default=None, required=False, type='list'),
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
vpc_id=dict(default=None, required=True)
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
lookup = module.params.get('lookup')
route_table_id = module.params.get('route_table_id')
state = module.params.get('state', 'present')
if lookup == 'id' and route_table_id is None:
module.fail_json(msg="You must specify route_table_id if lookup is set to id")
try:
if state == 'present':
result = ensure_route_table_present(connection, module)
elif state == 'absent':
result = ensure_route_table_absent(connection, module)
except AnsibleRouteTableException as e:
if e.error_traceback:
module.fail_json(msg=e.message, exception=e.error_traceback)
module.fail_json(msg=e.message)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
KhalidGit/flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/_vendor/distlib/wheel.py | 185 | 38259 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
data = b'#!python' + data[m.end():]
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = b'#!python' + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
p = to_posix(os.path.relpath(record_path, base))
writer.writerow((p, '', ''))
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s-1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s-%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| apache-2.0 |
sentient-energy/googletest | googlemock/test/gmock_test_utils.py | 769 | 3684 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| bsd-3-clause |
2ndQuadrant/ansible | test/units/playbook/test_task.py | 43 | 3567 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch
from ansible.playbook.task import Task
from ansible.parsing.yaml import objects
from ansible import errors
basic_command_task = dict(
name='Test Task',
command='echo hi'
)
kv_command_task = dict(
action='command echo hi'
)
# See #36848
kv_bad_args_str = '- apk: sdfs sf sdf 37'
kv_bad_args_ds = {'apk': 'sdfs sf sdf 37'}
class TestTask(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_construct_empty_task(self):
Task()
def test_construct_task_with_role(self):
pass
def test_construct_task_with_block(self):
pass
def test_construct_task_with_role_and_block(self):
pass
def test_load_task_simple(self):
t = Task.load(basic_command_task)
assert t is not None
self.assertEqual(t.name, basic_command_task['name'])
self.assertEqual(t.action, 'command')
self.assertEqual(t.args, dict(_raw_params='echo hi'))
def test_load_task_kv_form(self):
t = Task.load(kv_command_task)
self.assertEqual(t.action, 'command')
self.assertEqual(t.args, dict(_raw_params='echo hi'))
@patch.object(errors.AnsibleError, '_get_error_lines_from_file')
def test_load_task_kv_form_error_36848(self, mock_get_err_lines):
ds = objects.AnsibleMapping(kv_bad_args_ds)
ds.ansible_pos = ('test_task_faux_playbook.yml', 1, 1)
mock_get_err_lines.return_value = (kv_bad_args_str, '')
with self.assertRaises(errors.AnsibleParserError) as cm:
Task.load(ds)
self.assertIsInstance(cm.exception, errors.AnsibleParserError)
self.assertEqual(cm.exception._obj, ds)
self.assertEqual(cm.exception._obj, kv_bad_args_ds)
self.assertIn("The error appears to be in 'test_task_faux_playbook.yml", cm.exception.message)
self.assertIn(kv_bad_args_str, cm.exception.message)
self.assertIn('apk', cm.exception.message)
self.assertEquals(cm.exception.message.count('The offending line'), 1)
self.assertEquals(cm.exception.message.count('The error appears to be in'), 1)
def test_task_auto_name(self):
assert 'name' not in kv_command_task
Task.load(kv_command_task)
# self.assertEqual(t.name, 'shell echo hi')
def test_task_auto_name_with_role(self):
pass
def test_load_task_complex_form(self):
pass
def test_can_load_module_complex_form(self):
pass
def test_local_action_implies_delegate(self):
pass
def test_local_action_conflicts_with_delegate(self):
pass
def test_delegate_to_parses(self):
pass
| gpl-3.0 |
soylentdeen/cuddly-weasel | gfVerification/plotSolar.py | 1 | 1472 | import matplotlib.pyplot as pyplot
import Moog960
import MoogTools
import numpy
import glob
correctionFiles = glob.glob('/home/deen/MoogPyData/AbsorptionLines/corrections/*Solar_results.dat')
solarSpectrum = Moog960.ObservedMelody.fromFile(filename='SolarSpectrum.fits')
solarSpectrum.loadData()
arcturusSpectrum = Moog960.ObservedMelody.fromFile(filename='ArcturusSpectrum.fits')
arcturusSpectrum.loadData()
solarSpectrum.selectPhrases(wlRange=[20000, 24000])
arcturusSpectrum.selectPhrases(wlRange=[20000, 24000])
observed, labels = solarSpectrum.perform()
solar = observed[0][0]
observed, labels = arcturusSpectrum.perform()
arcturus = observed[0][0]
wls = []
ycoord = []
left = []
width = []
height = []
counter = 0
for corrections in correctionFiles:
with open(corrections, 'r') as f:
lines = f.readlines()
for line in lines:
wls.append(float(line.split()[0]))
ycoord.append(1.0)
#print counter
#print wls[counter], wls[-1], wls[-2]
left.append(wls[counter])
width.append(wls[-1] - wls[counter])
height.append(1.0)
counter = len(wls)
fig = pyplot.figure(0)
fig.clear()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.plot(solar.wl, solar.flux_I, color = 'k')
ax.plot(arcturus.wl-5.5, arcturus.flux_I, color ='g')
#ax.scatter(wls, ycoord, marker='o', s=30, color = 'r')
ax.bar(left, height, width=width, alpha=0.5, color = 'r')
ax.set_xbound(20000, 23000)
ax.set_ybound(0.0, 1.1)
fig.show()
| mit |
pbryzek/Freedom | objects/obj_home.py | 1 | 3607 | from common.globals import handle_err_msg
from common.globals import format_citystatezip
import consts.switches as switches
class HomeObj(object):
def __init__(self, type, address, city, state, zip, dom, listing_id, beds, baths, yearbuilt, sqfootage, lotsize, latitude, longitude, homelink, graphlink, maplink, compslink, zpid, zestimate, lastupdated, rentestimate, lastupdated_rent, num_hot_words):
self.address = address.replace(",", " ").strip()
address_pieces = self.address.split()
self.address_num = ""
self.address_st = ""
if len(address_pieces) > 0:
self.address_num = address_pieces[0]
address_num_len = len(self.address_num)
self.address_st = self.address[address_num_len:].strip()
if "-" in self.address_num:
handle_err_msg("ERROR! found a dash in the property numbers: " + self.address)
self.citystatezip = format_citystatezip(city, state, zip)
self.city = city
self.state = state
self.zip = zip
self.num_hot_words = num_hot_words
self.yearbuilt = yearbuilt
self.sqfootage = sqfootage
self.lotsize = lotsize
self.baths = baths
self.beds = beds
self.latitude = latitude
self.longitude = longitude
self.homelink = homelink
self.graphlink = graphlink
self.maplink = maplink
self.compslink = compslink
self.chartlink = ""
self.redfin_link = ""
self.zpid = zpid
if not zestimate:
zestimate = "-1"
self.zestimate = zestimate
self.lastupdated = lastupdated
self.rentestimate = rentestimate
self.lastupdated_rent = lastupdated_rent
#Distance from home, used to keep the csv consistent
self.distance = 0
#If the DOM is less than the standard, then it is a tier 2 property
if dom and not (dom is None):
self.dom = dom
else:
self.dom = 0
if dom < switches.MIN_DOM:
self.tier = 2
else:
self.tier = 1
self.listing_id = listing_id
self.type = type
def create_csv(self):
if self.homelink is None:
print "self.homelink is null"
if self.graphlink is None:
print "self.graphlink is None"
if self.maplink is None:
print "self.maplink is null"
if self.compslink is None:
print "self.compslink is None"
if self.zpid is None:
print "self.zpid is null"
if self.zestimate is None:
print "self.zestimate is None"
comma_separated_1 = self.type + "," + self.address_num + "," + self.address_st + "," + self.city + "," + self.state + "," + self.zip + ","
comma_separated_2 = str(self.dom) + "," + str(self.listing_id) + "," + self.beds + "," + self.baths + "," + self.sqfootage + "," + str(self.lotsize) + ","
comma_separated_3 = self.yearbuilt + "," + self.latitude + "," + self.longitude + "," + self.redfin_link + "," + self.chartlink + ","
comma_separated_4 = self.homelink + "," + self.graphlink + "," + self.maplink + "," + self.compslink + "," + self.zpid + "," + self.zestimate + ","
comma_separated_5 = self.lastupdated + "," + self.rentestimate + "," + self.lastupdated_rent + "," + str(self.distance) + "," + str(self.num_hot_words)
comma_separated = comma_separated_1 + comma_separated_2 + comma_separated_3 + comma_separated_4 + comma_separated_5
return comma_separated
| mit |
ashishnitinpatil/django_appengine_project_template | django/contrib/admin/helpers.py | 102 | 14143 | from __future__ import unicode_literals
from django import forms
from django.contrib.admin.util import (flatten_fieldsets, lookup_field,
display_for_field, label_for_field, help_text_for_field)
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.util import flatatt
from django.template.defaultfilters import capfirst
from django.utils.encoding import force_text, smart_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, normalize_fieldsets(fieldsets)
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def first_field(self):
try:
fieldset_name, fieldset_options = self.fieldsets[0]
field_name = fieldset_options['fields'][0]
if not isinstance(field_name, six.string_types):
field_name = field_name[0]
return self.form[field_name]
except (KeyError, IndexError):
pass
try:
return next(iter(self.form))
except StopIteration:
return None
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = ' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, six.text_type):
self.fields = [field]
else:
self.fields = field
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe('\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n'))
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
def label_tag(self):
classes = []
contents = conditional_escape(force_text(self.field.label))
if self.is_checkbox:
classes.append('vCheckboxLabel')
if self.field.field.required:
classes.append('required')
if not self.is_first:
classes.append('inline')
attrs = {'class': ' '.join(classes)} if classes else {}
# checkboxes should not have a label suffix as the checkbox appears
# to the left of the label.
return self.field.label_tag(contents=mark_safe(contents), attrs=attrs,
label_suffix='' if self.is_checkbox else None)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
label = label_for_field(field, form._meta.model, model_admin)
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ if field.__name__ != '<lambda>' else ''
else:
class_name = field
self.field = {
'name': class_name,
'label': label,
'field': field,
'help_text': help_text_for_field(class_name, form._meta.model)
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
return format_html('<label{0}>{1}:</label>',
flatatt(attrs),
capfirst(force_text(label)))
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_text(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
if isinstance(f.rel, ManyToManyRel) and value is not None:
result_repr = ", ".join(map(six.text_type, value.all()))
else:
result_repr = display_for_field(value, f)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, original, self.readonly_fields,
model_admin=self.opts)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, None, self.readonly_fields,
model_admin=self.opts)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field_name:
continue
if field_name in self.readonly_fields:
yield {
'label': label_for_field(field_name, self.opts.model, self.opts),
'widget': {
'is_hidden': False
},
'required': False,
'help_text': help_text_for_field(field_name, self.opts.model),
}
else:
yield self.formset.form.base_fields[field_name]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
if original is not None:
self.original_content_type_id = ContentType.objects.get_for_model(original).pk
self.show_url = original and hasattr(original, 'get_absolute_url')
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def needs_explicit_pk_field(self):
# Auto fields are editable (oddly), so need to check for auto or non-editable pk
if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable:
return True
# Also search any parents for an auto field. (The pk info is propagated to child
# models so that does not need to be checked in parents.)
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def field_count(self):
# tabular.html uses this function for colspan value.
num_of_fields = 0
if self.has_auto_field():
num_of_fields += 1
num_of_fields += len(self.fieldsets[0][1]["fields"])
if self.formset.can_order:
num_of_fields += 1
if self.formset.can_delete:
num_of_fields += 1
return num_of_fields
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.util.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
if form.is_bound:
self.extend(list(six.itervalues(form.errors)))
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(list(six.itervalues(errors_in_inline_form)))
def normalize_fieldsets(fieldsets):
"""
Make sure the keys in fieldset dictionaries are strings. Returns the
normalized data.
"""
result = []
for name, options in fieldsets:
result.append((name, normalize_dictionary(options)))
return result
def normalize_dictionary(data_dict):
"""
Converts all the keys in "data_dict" to strings. The keys must be
convertible using str().
"""
for key, value in data_dict.items():
if not isinstance(key, str):
del data_dict[key]
data_dict[str(key)] = value
return data_dict
| bsd-2-clause |
puittenbroek/slimmermeten | fabfile.py | 1 | 5525 | """
Starter fabfile for deploying the bigfattemplate project.
Change all the things marked CHANGEME. Other things can be left at their
defaults if you are happy with the default layout.
"""
import posixpath
from fabric.api import run, local, env, settings, cd, task
from fabric.contrib.files import exists
from fabric.operations import _prefix_commands, _prefix_env_vars
#from fabric.decorators import runs_once
#from fabric.context_managers import cd, lcd, settings, hide
# CHANGEME
env.hosts = ['user@bigfattemplate.example.com']
env.code_dir = '/srv/www/bigfattemplate'
env.project_dir = '/srv/www/bigfattemplate/bigfattemplate'
env.static_root = '/srv/www/bigfattemplate/static/'
env.virtualenv = '/srv/www/bigfattemplate/.virtualenv'
env.code_repo = 'git@github.com:user/bigfattemplate.git'
env.django_settings_module = 'bigfattemplate.settings'
# Python version
PYTHON_BIN = "python2.7"
PYTHON_PREFIX = "" # e.g. /usr/local Use "" for automatic
PYTHON_FULL_PATH = "%s/bin/%s" % (PYTHON_PREFIX, PYTHON_BIN) if PYTHON_PREFIX else PYTHON_BIN
# Set to true if you can restart your webserver (via wsgi.py), false to stop/start your webserver
# CHANGEME
DJANGO_SERVER_RESTART = False
def virtualenv(venv_dir):
"""
Context manager that establishes a virtualenv to use.
"""
return settings(venv=venv_dir)
def run_venv(command, **kwargs):
"""
Runs a command in a virtualenv (which has been specified using
the virtualenv context manager
"""
run("source %s/bin/activate" % env.virtualenv + " && " + command, **kwargs)
def install_dependencies():
ensure_virtualenv()
with virtualenv(env.virtualenv):
with cd(env.code_dir):
run_venv("pip install -r requirements/production.txt")
def ensure_virtualenv():
if exists(env.virtualenv):
return
with cd(env.code_dir):
run("virtualenv --no-site-packages --python=%s %s" %
(PYTHON_BIN, env.virtualenv))
run("echo %s > %s/lib/%s/site-packages/projectsource.pth" %
(env.project_dir, env.virtualenv, PYTHON_BIN))
def ensure_src_dir():
if not exists(env.code_dir):
run("mkdir -p %s" % env.code_dir)
with cd(env.code_dir):
if not exists(posixpath.join(env.code_dir, '.git')):
run('git clone %s .' % (env.code_repo))
def push_sources():
"""
Push source code to server
"""
ensure_src_dir()
local('git push origin master')
with cd(env.code_dir):
run('git pull origin master')
@task
def run_tests():
""" Runs the Django test suite as is. """
local("./manage.py test")
@task
def version():
""" Show last commit to the deployed repo. """
with cd(env.code_dir):
run('git log -1')
@task
def uname():
""" Prints information about the host. """
run("uname -a")
@task
def webserver_stop():
"""
Stop the webserver that is running the Django instance
"""
run("service apache2 stop")
@task
def webserver_start():
"""
Starts the webserver that is running the Django instance
"""
run("service apache2 start")
@task
def webserver_restart():
"""
Restarts the webserver that is running the Django instance
"""
if DJANGO_SERVER_RESTART:
with cd(env.code_dir):
run("touch %s/wsgi.py" % env.project_dir)
else:
with settings(warn_only=True):
webserver_stop()
webserver_start()
def restart():
""" Restart the wsgi process """
with cd(env.code_dir):
run("touch %s/bigfattemplate/wsgi.py" % env.code_dir)
def build_static():
assert env.static_root.strip() != '' and env.static_root.strip() != '/'
with virtualenv(env.virtualenv):
with cd(env.code_dir):
run_venv("./manage.py collectstatic -v 0 --clear --noinput")
run("chmod -R ugo+r %s" % env.static_root)
@task
def first_deployment_mode():
"""
Use before first deployment to switch on fake south migrations.
"""
env.initial_deploy = True
@task
def update_database(app=None):
"""
Update the database (run the migrations)
Usage: fab update_database:app_name
"""
with virtualenv(env.virtualenv):
with cd(env.code_dir):
if getattr(env, 'initial_deploy', False):
run_venv("./manage.py syncdb --all")
run_venv("./manage.py migrate --fake --noinput")
else:
run_venv("./manage.py syncdb --noinput")
if app:
run_venv("./manage.py migrate %s --noinput" % app)
else:
run_venv("./manage.py migrate --noinput")
@task
def sshagent_run(cmd):
"""
Helper function.
Runs a command with SSH agent forwarding enabled.
Note:: Fabric (and paramiko) can't forward your SSH agent.
This helper uses your system's ssh to do so.
"""
# Handle context manager modifications
wrapped_cmd = _prefix_commands(_prefix_env_vars(cmd), 'remote')
try:
host, port = env.host_string.split(':')
return local(
"ssh -p %s -A %s@%s '%s'" % (port, env.user, host, wrapped_cmd)
)
except ValueError:
return local(
"ssh -A %s@%s '%s'" % (env.user, env.host_string, wrapped_cmd)
)
@task
def deploy():
"""
Deploy the project.
"""
with settings(warn_only=True):
webserver_stop()
push_sources()
install_dependencies()
update_database()
build_static()
webserver_start()
| bsd-3-clause |
karthikvadla16/spark-tk | regression-tests/sparktkregtests/testcases/frames/entropy_test.py | 14 | 2228 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test Shannon entropy calculations """
import unittest
import math
from sparktkregtests.lib import sparktk_test
class EntropyTest(sparktk_test.SparkTKTestCase):
def test_entropy_coin_flip(self):
""" Get entropy on balanced coin flip. """
# initialize data and expected result
frame_load = 10 * [['H'], ['T']]
expected = math.log(2)
# create the frame
frame = self.context.frame.create(frame_load,
schema=[("data", str)])
# call the entropy function
computed_entropy = frame.entropy("data")
# test that we get the expected result
self.assertAlmostEqual(computed_entropy,
expected, delta=.001)
def test_entropy_exponential(self):
""" Get entropy on exponential distribution. """
frame_load = [[0, 1], [1, 2], [2, 4], [4, 8]]
# Expected result from an on-line entropy calculator in base 2
expected = 1.640223928941852 * math.log(2)
# create frame
frame = self.context.frame.create(frame_load,
schema=[("data", int),
("weight", int)])
# call the entropy function to calculate
computed_entropy = frame.entropy("data", "weight")
# compare our sparktk result with the expected result
self.assertAlmostEqual(computed_entropy, expected)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
yangzilong1986/python | Matafight/0012/ResenWord.py | 38 | 1202 | # -*-coding:utf-8-*-
import string
class senseWord():
def __init__(self):
self.list=[]
self.word=[]
inputfile=file('filtered_word.txt','r')
for lines in inputfile.readlines():
self.list.append(lines.decode('utf-8').encode('gbk'))#I've set the file coding type as utf-8
inputfile.close()
self.list=map(string.strip,self.list);
def checkWord(self,word):
flag=False
for words in self.list:
if words in word:
self.word.append(words)
flag= True
return flag
def getWord(self):
return self.word
if __name__=='__main__':
myCheck=senseWord()
while True:
ipstr=str(raw_input())
if ipstr:
if(myCheck.checkWord(ipstr)):
senseList=myCheck.getWord()
for items in senseList:
length=len(items.decode('gbk'))
torep='*';
for i in range(1,length):
torep+='*'
ipstr=ipstr.replace(items,torep)
print ipstr
else:
print ipstr
else:
break
| mit |
cycotech/WAR-app | env/lib/python3.5/site-packages/django/utils/timesince.py | 64 | 2857 | from __future__ import unicode_literals
import calendar
import datetime
from django.utils.html import avoid_wrapping
from django.utils.timezone import is_aware, utc
from django.utils.translation import ugettext, ungettext_lazy
TIMESINCE_CHUNKS = (
(60 * 60 * 24 * 365, ungettext_lazy('%d year', '%d years')),
(60 * 60 * 24 * 30, ungettext_lazy('%d month', '%d months')),
(60 * 60 * 24 * 7, ungettext_lazy('%d week', '%d weeks')),
(60 * 60 * 24, ungettext_lazy('%d day', '%d days')),
(60 * 60, ungettext_lazy('%d hour', '%d hours')),
(60, ungettext_lazy('%d minute', '%d minutes'))
)
def timesince(d, now=None, reversed=False):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from
http://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now(utc if is_aware(d) else None)
if reversed:
d, now = now, d
delta = now - d
# Deal with leapyears by subtracing the number of leapdays
leapdays = calendar.leapdays(d.year, now.year)
if leapdays != 0:
if calendar.isleap(d.year):
leapdays -= 1
elif calendar.isleap(now.year):
leapdays += 1
delta -= datetime.timedelta(leapdays)
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(ugettext('0 minutes'))
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
count = since // seconds
if count != 0:
break
result = avoid_wrapping(name % count)
if i + 1 < len(TIMESINCE_CHUNKS):
# Now get the second item
seconds2, name2 = TIMESINCE_CHUNKS[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
result += ugettext(', ') + avoid_wrapping(name2 % count2)
return result
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
return timesince(d, now, reversed=True)
| mit |
tarzan0820/odoo | addons/account_cancel/__openerp__.py | 261 | 1621 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Cancel Journal Entries',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Allows canceling accounting entries.
====================================
This module adds 'Allow Canceling Entries' field on form view of account journal.
If set to true it allows user to cancel entries & invoices.
""",
'website': 'https://www.odoo.com/page/accounting',
'depends' : ['account'],
'data': ['account_cancel_view.xml' ],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alexschiller/osf.io | api/users/permissions.py | 14 | 1403 | from website.models import User
from rest_framework import permissions
class ReadOnlyOrCurrentUser(permissions.BasePermission):
""" Check to see if the request is coming from the currently logged in user,
and allow non-safe actions if so.
"""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, User), 'obj must be a User, got {}'.format(obj)
request_user = request.user
if request.method in permissions.SAFE_METHODS:
return True
else:
return obj == request_user
class CurrentUser(permissions.BasePermission):
""" Check to see if the request is coming from the currently logged user
"""
def has_permission(self, request, view):
requested_user = view.get_user()
assert isinstance(requested_user, User), 'obj must be a User, got {}'.format(requested_user)
return requested_user == request.user
class ReadOnlyOrCurrentUserRelationship(permissions.BasePermission):
""" Check to see if the request is coming from the currently logged in user,
and allow non-safe actions if so.
"""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
request_user = request.user
if request.method in permissions.SAFE_METHODS:
return True
else:
return obj['self']._id == request_user._id
| apache-2.0 |
fluxw42/youtube-dl | test/test_download.py | 2 | 10158 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
assertGreaterEqual,
expect_warnings,
get_params,
gettestcases,
expect_info_dict,
try_rm,
report_warning,
)
import hashlib
import io
import json
import socket
import youtube_dl.YoutubeDL
from youtube_dl.compat import (
compat_http_client,
compat_urllib_error,
compat_HTTPError,
)
from youtube_dl.utils import (
DownloadError,
ExtractorError,
format_bytes,
UnavailableVideoError,
)
from youtube_dl.extractor import get_info_extractor
RETRIES = 3
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
self.to_stderr = self.to_screen
self.processed_info_dicts = []
super(YoutubeDL, self).__init__(*args, **kwargs)
def report_warning(self, message):
# Don't accept warnings during tests
raise ExtractorError(message)
def process_info(self, info_dict):
self.processed_info_dicts.append(info_dict)
return super(YoutubeDL, self).process_info(info_dict)
def _file_md5(fn):
with open(fn, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
defs = gettestcases()
class TestDownload(unittest.TestCase):
# Parallel testing in nosetests. See
# http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html
_multiprocess_shared_ = True
maxDiff = None
def __str__(self):
"""Identify each test with the `add_ie` attribute, if available."""
def strclass(cls):
"""From 2.7's unittest; 2.6 had _strclass so we can't import it."""
return '%s.%s' % (cls.__module__, cls.__name__)
add_ie = getattr(self, self._testMethodName).add_ie
return '%s (%s)%s:' % (self._testMethodName,
strclass(self.__class__),
' [%s]' % add_ie if add_ie else '')
def setUp(self):
self.defs = defs
# Dynamically generate tests
def generator(test_case, tname):
def test_template(self):
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
is_playlist = any(k.startswith('playlist') for k in test_case)
test_cases = test_case.get(
'playlist', [] if is_playlist else [test_case])
def print_skipping(reason):
print('Skipping %s: %s' % (test_case['name'], reason))
if not ie.working():
print_skipping('IE marked as not _WORKING')
return
for tc in test_cases:
info_dict = tc.get('info_dict', {})
if not (info_dict.get('id') and info_dict.get('ext')):
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
if 'skip' in test_case:
print_skipping(test_case['skip'])
return
for other_ie in other_ies:
if not other_ie.working():
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
return
params = get_params(test_case.get('params', {}))
params['outtmpl'] = tname + '_' + params['outtmpl']
if is_playlist and 'playlist' not in test_case:
params.setdefault('extract_flat', 'in_playlist')
params.setdefault('skip_download', True)
ydl = YoutubeDL(params, auto_init=False)
ydl.add_default_info_extractors()
finished_hook_called = set()
def _hook(status):
if status['status'] == 'finished':
finished_hook_called.add(status['filename'])
ydl.add_progress_hook(_hook)
expect_warnings(ydl, test_case.get('expected_warnings', []))
def get_tc_filename(tc):
return ydl.prepare_filename(tc.get('info_dict', {}))
res_dict = None
def try_rm_tcs_files(tcs=None):
if tcs is None:
tcs = test_cases
for tc in tcs:
tc_filename = get_tc_filename(tc)
try_rm(tc_filename)
try_rm(tc_filename + '.part')
try_rm(os.path.splitext(tc_filename)[0] + '.info.json')
try_rm_tcs_files()
try:
try_num = 1
while True:
try:
# We're not using .download here since that is just a shim
# for outside error handling, and returns the exit code
# instead of the result dict.
res_dict = ydl.extract_info(
test_case['url'],
force_generic_extractor=params.get('force_generic_extractor', False))
except (DownloadError, ExtractorError) as err:
# Check if the exception is not a network related one
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
raise
if try_num == RETRIES:
report_warning('%s failed due to network errors, skipping...' % tname)
return
print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
try_num += 1
else:
break
if is_playlist:
self.assertTrue(res_dict['_type'] in ['playlist', 'multi_video'])
self.assertTrue('entries' in res_dict)
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
if 'playlist_mincount' in test_case:
assertGreaterEqual(
self,
len(res_dict['entries']),
test_case['playlist_mincount'],
'Expected at least %d in playlist %s, but got only %d' % (
test_case['playlist_mincount'], test_case['url'],
len(res_dict['entries'])))
if 'playlist_count' in test_case:
self.assertEqual(
len(res_dict['entries']),
test_case['playlist_count'],
'Expected %d entries in playlist %s, but got %d.' % (
test_case['playlist_count'],
test_case['url'],
len(res_dict['entries']),
))
if 'playlist_duration_sum' in test_case:
got_duration = sum(e['duration'] for e in res_dict['entries'])
self.assertEqual(
test_case['playlist_duration_sum'], got_duration)
# Generalize both playlists and single videos to unified format for
# simplicity
if 'entries' not in res_dict:
res_dict['entries'] = [res_dict]
for tc_num, tc in enumerate(test_cases):
tc_res_dict = res_dict['entries'][tc_num]
# First, check test cases' data against extracted data alone
expect_info_dict(self, tc_res_dict, tc.get('info_dict', {}))
# Now, check downloaded file consistency
tc_filename = get_tc_filename(tc)
if not test_case.get('params', {}).get('skip_download', False):
self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
self.assertTrue(tc_filename in finished_hook_called)
expected_minsize = tc.get('file_minsize', 10000)
if expected_minsize is not None:
if params.get('test'):
expected_minsize = max(expected_minsize, 10000)
got_fsize = os.path.getsize(tc_filename)
assertGreaterEqual(
self, got_fsize, expected_minsize,
'Expected %s to be at least %s, but it\'s only %s ' %
(tc_filename, format_bytes(expected_minsize),
format_bytes(got_fsize)))
if 'md5' in tc:
md5_for_file = _file_md5(tc_filename)
self.assertEqual(md5_for_file, tc['md5'])
# Finally, check test cases' data again but this time against
# extracted data from info JSON file written during processing
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
self.assertTrue(
os.path.exists(info_json_fn),
'Missing info file %s' % info_json_fn)
with io.open(info_json_fn, encoding='utf-8') as infof:
info_dict = json.load(infof)
expect_info_dict(self, info_dict, tc.get('info_dict', {}))
finally:
try_rm_tcs_files()
if is_playlist and res_dict is not None and res_dict.get('entries'):
# Remove all other files that may have been extracted if the
# extractor returns full results even with extract_flat
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
try_rm_tcs_files(res_tcs)
return test_template
# And add them to TestDownload
for n, test_case in enumerate(defs):
tname = 'test_' + str(test_case['name'])
i = 1
while hasattr(TestDownload, tname):
tname = 'test_%s_%d' % (test_case['name'], i)
i += 1
test_method = generator(test_case, tname)
test_method.__name__ = str(tname)
ie_list = test_case.get('add_ie')
test_method.add_ie = ie_list and ','.join(ie_list)
setattr(TestDownload, test_method.__name__, test_method)
del test_method
if __name__ == '__main__':
unittest.main()
| unlicense |
mtagle/airflow | airflow/providers/google/cloud/example_dags/example_automl_nl_text_extraction.py | 5 | 3458 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator, AutoMLDeleteDatasetOperator, AutoMLDeleteModelOperator,
AutoMLImportDataOperator, AutoMLTrainModelOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_TEXT_BUCKET = os.environ.get(
"GCP_AUTOML_TEXT_BUCKET", "gs://cloud-ml-data/NL-entity/dataset.csv"
)
# Example values
DATASET_ID = ""
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"text_extraction_model_metadata": {},
}
# Example dataset
DATASET = {"display_name": "test_text_dataset", "text_extraction_dataset_metadata": {}}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_TEXT_BUCKET]}}
default_args = {"start_date": days_ago(1)}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Natural Language Entities Extraction
with models.DAG(
"example_automl_text",
default_args=default_args,
schedule_interval=None, # Override to match your needs
user_defined_macros={"extract_object_id": extract_object_id},
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = (
'{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}'
)
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(
task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION
)
model_id = "{{ task_instance.xcom_pull('create_model', key='model_id') }}"
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
create_dataset_task >> import_dataset_task >> create_model >> \
delete_model_task >> delete_datasets_task
| apache-2.0 |
bitemyapp/flightcrew | src/googlemock/gtest/test/gtest_output_test.py | 5 | 11611 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
if IS_WINDOWS:
GOLDEN_NAME = 'gtest_output_test_golden_win.txt'
else:
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
(SUPPORTS_THREADS or IS_WINDOWS))
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = RemoveTestCounts(normalized_actual)
normalized_golden = RemoveTestCounts(self.RemoveUnsupportedTests(
normalized_golden))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests""")
if IS_WINDOWS:
message += (
"""\nand typed tests). Please check that you are using VC++ 8.0 SP1
or higher as your compiler.""")
else:
message += """\ntyped tests, and threads). Please generate the
golden file using a binary built with those features enabled."""
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| gpl-3.0 |
cloudify-cosmo/softlayer-python | SoftLayer/CLI/loadbal/service_edit.py | 1 | 1738 | """Edit the properties of a service group."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import loadbal
import click
@click.command()
@click.argument('identifier')
@click.option('--enabled / --disabled',
default=None,
help="Enable or disable the service")
@click.option('--port',
help="Change the port number for the service", type=click.INT)
@click.option('--weight',
type=click.INT,
help="Change the weight of the service")
@click.option('--healthcheck-type', help="Change the health check type")
@click.option('--ip-address', '--ip', help="Change the IP of the service")
@environment.pass_env
def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address):
"""Edit the properties of a service group."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, service_id = loadbal.parse_id(identifier)
# check if any input is provided
if not any([ip_address, enabled, weight, port, healthcheck_type]):
return 'At least one property is required to be changed!'
# check if the IP is valid
ip_address_id = None
if ip_address:
ip_service = env.client['Network_Subnet_IpAddress']
ip_record = ip_service.getByIpAddress(ip_address)
ip_address_id = ip_record['id']
mgr.edit_service(loadbal_id,
service_id,
ip_address_id=ip_address_id,
enabled=enabled,
port=port,
weight=weight,
hc_type=healthcheck_type)
return 'Load balancer service %s is being modified!' % identifier
| mit |
sda2b/youtube-dl | youtube_dl/extractor/quickvid.py | 113 | 1719 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
determine_ext,
int_or_none,
)
class QuickVidIE(InfoExtractor):
_VALID_URL = r'https?://(www\.)?quickvid\.org/watch\.php\?v=(?P<id>[a-zA-Z_0-9-]+)'
_TEST = {
'url': 'http://quickvid.org/watch.php?v=sUQT3RCG8dx',
'md5': 'c0c72dd473f260c06c808a05d19acdc5',
'info_dict': {
'id': 'sUQT3RCG8dx',
'ext': 'mp4',
'title': 'Nick Offerman\'s Summer Reading Recap',
'thumbnail': 're:^https?://.*\.(?:png|jpg|gif)$',
'view_count': int,
},
'skip': 'Not accessible from Travis CI server',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h2>(.*?)</h2>', webpage, 'title')
view_count = int_or_none(self._html_search_regex(
r'(?s)<div id="views">(.*?)</div>',
webpage, 'view count', fatal=False))
video_code = self._search_regex(
r'(?s)<video id="video"[^>]*>(.*?)</video>', webpage, 'video code')
formats = [
{
'url': compat_urlparse.urljoin(url, src),
'format_id': determine_ext(src, None),
} for src in re.findall('<source\s+src="([^"]+)"', video_code)
]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': self._og_search_thumbnail(webpage),
'view_count': view_count,
}
| unlicense |
spark0001/spark2.1.1 | examples/src/main/python/ml/stopwords_remover_example.py | 123 | 1434 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import StopWordsRemover
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("StopWordsRemoverExample")\
.getOrCreate()
# $example on$
sentenceData = spark.createDataFrame([
(0, ["I", "saw", "the", "red", "balloon"]),
(1, ["Mary", "had", "a", "little", "lamb"])
], ["id", "raw"])
remover = StopWordsRemover(inputCol="raw", outputCol="filtered")
remover.transform(sentenceData).show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
liu1xin/aria2app | aria2cmd/serverwrapper.py | 1 | 2728 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import aria2app.a2jsonrpc as a2jsonrpc
import aria2app.a2config as a2config
######################## Convenience Methods for Server#############################
class Aria2ServerWrapper(object):
def __init__(self, cfgfile, token=None):
self.token = token
self.server = a2jsonrpc.Aria2JsonRpcServer(token)
self.config = a2config.Aria2Config(cfgfile)
def startA2Server(self, restart=False):
cmd = []
# set default config
defaults = self.config.getcfgbysection('DEFAULT')
dir = defaults.get('storedir', '')
cmd.append('--dir=%s' % dir)
logfile = defaults.get('logfile', None)
loglevel = defaults.get('loglevel', 'info')
if logfile:
cmd.append('--log=%s' % logfile)
cmd.append('--log-level=%s' % loglevel)
maxdownloads = defaults.get('maxconcurrentdownloads', 10)
maxdownlimit = defaults.get('maxoveralldownloadlimit', '10M')
maxuploadlimit = defaults.get('maxoveralluploadlimit', '10M')
cmd.append('--max-concurrent-downloads=%d' % int(maxdownloads))
cmd.append('--max-overall-download-limit=%s' % maxdownlimit)
cmd.append('--max-overall-upload-limit=%s' % maxuploadlimit)
cache = defaults.get('cache', '32M')
fileallocation = defaults.get('fileallocation', 'falloc')
cmd.append('--disk-cache=%s' % cache)
cmd.append('--file-allocation=%s' % fileallocation)
# set rpc config
rpcs = self.config.getcfgbysection('RPC')
cmd.append('--enable-rpc=true')
cmd.append('--rpc-listen-all=true')
cmd.append('-D')
if self.token:
cmd.append('--rpc-secret=%s' % self.token)
else:
token = rpcs.get('token', 'abcdef098')
cmd.append('--rpc-secret=%s' % token)
self.token = token
port = rpcs.get('port', '6800')
cmd.append('--rpc-listen-port=%s' % port)
self.server.setextendcmd(cmd)
ret = self.server.start(restart)
def stopA2():
pass
################################### Main for Test###################################
def main(args=None):
token = '11111111'
ajserver = a2jsonrpc.Aria2JsonRpcServer(token)
ret = ajserver.start(restart=True)
print('start ajserver %d' % ret)
ajserver.stop()
def main1(args=None):
a2server = Aria2ServerWrapper("/home/liuyx/aapp/app/a2config.cnf")
a2server.startA2Server()
if __name__ == '__main__':
try:
main1()
except KeyboardInterrupt:
pass | bsd-2-clause |
Tesora-Release/tesora-horizon | openstack_dashboard/dashboards/project/volumes/test.py | 27 | 3473 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_SNAPSHOTS_TAB_URL = reverse('horizon:project:volumes:snapshots_tab')
VOLUME_BACKUPS_TAB_URL = reverse('horizon:project:volumes:backups_tab')
class VolumeAndSnapshotsTests(test.TestCase):
@test.create_stubs({api.cinder: ('tenant_absolute_limits',
'volume_list',
'volume_snapshot_list',
'volume_backup_supported',
'volume_backup_list',
),
api.nova: ('server_list',)})
def _test_index(self, backup_supported=True):
vol_backups = self.cinder_volume_backups.list()
vol_snaps = self.cinder_volume_snapshots.list()
volumes = self.cinder_volumes.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(backup_supported)
api.cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
api.cinder.volume_snapshot_list(
IsA(http.HttpRequest), search_opts=None).AndReturn(vol_snaps)
api.cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(vol_snaps)
api.cinder.volume_list(IsA(http.HttpRequest)).AndReturn(volumes)
if backup_supported:
api.cinder.volume_backup_list(IsA(http.HttpRequest)).\
AndReturn(vol_backups)
api.cinder.volume_list(IsA(http.HttpRequest)).AndReturn(volumes)
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'project/volumes/index.html')
# Explicitly load the other tabs. If this doesn't work the test
# will fail due to "Expected methods never called."
res = self.client.get(VOLUME_SNAPSHOTS_TAB_URL)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'project/volumes/index.html')
if backup_supported:
res = self.client.get(VOLUME_BACKUPS_TAB_URL)
self.assertTemplateUsed(res, 'project/volumes/index.html')
def test_index_backup_supported(self):
self._test_index(backup_supported=True)
def test_index_backup_not_supported(self):
self._test_index(backup_supported=False)
| apache-2.0 |
markfinal/BuildAMation | tests/builderactions.py | 1 | 12496 | #!/usr/bin/python
import copy
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import traceback
class Builder(object):
"""Class that represents the actions for a builder"""
def __init__(self, repeat_no_clean):
self.repeat_no_clean = repeat_no_clean
self.num_threads = multiprocessing.cpu_count()
def init(self, options):
pass
def pre_action(self):
pass
def post_action(self, instance, options, output_messages, error_messages):
return 0
def dump_generated_files(self, instance, options):
pass
def _execute_and_capture(self, arg_list, working_dir, output_messages, error_messages):
locallog("Running '%s' in %s" % (' '.join(arg_list), working_dir))
return_code = 0
out_fd, out_path = tempfile.mkstemp()
err_fd, err_path = tempfile.mkstemp()
try:
with os.fdopen(out_fd, 'w') as out:
with os.fdopen(err_fd, 'w') as err:
p = subprocess.Popen(arg_list, stdout=out_fd, stderr=err_fd, cwd=working_dir)
while p.poll() is None:
sys.stdout.write('+') # keep something alive on the console
sys.stdout.flush()
time.sleep(1)
p.wait()
locallog('')
return_code = p.returncode
except Exception:
error_messages.write("Failed to run '%s' in %s" % (' '.join(arg_list), working_dir))
raise
finally:
with open(out_path) as out:
output_messages.write(out.read())
with open(err_path) as err:
error_messages.write(err.read())
os.remove(out_path)
os.remove(err_path)
return return_code
# the version of MSBuild.exe to use, depends on which version of VisualStudio
# was used to build the solution and projects
# by default, VS2013 is assumed
defaultVCVersion = "12.0"
msBuildVersionToNetMapping = {
"9.0": "v3.5",
"10.0": "v4.0.30319",
"11.0": "v4.0.30319",
"12.0": "v4.0.30319",
"14.0": "14.0",
"15.0": "15.0",
"16" : "Current"
}
visualStudioVersionMapping = {
"15.0": "2017",
"16" : "2019"
}
def locallog(message):
sys.stdout.write(message)
sys.stdout.write('\n')
sys.stdout.flush()
class NativeBuilder(Builder):
def __init__(self):
super(NativeBuilder, self).__init__(True)
class VSSolutionBuilder(Builder):
def __init__(self):
super(VSSolutionBuilder, self).__init__(False)
self._ms_build_path = None
def _get_visualc_version(self, options):
try:
for f in options.Flavours:
if f.startswith("--VisualC.version"):
visualc_version = f.split("=")[1]
break
except TypeError: # Flavours can be None
pass
try:
visualc_version_split = visualc_version.split('.')
except UnboundLocalError:
visualc_version = defaultVCVersion
visualc_version_split = visualc_version.split('.')
return visualc_version, visualc_version_split
def _get_visualc_ispreview(self, options):
try:
for f in options.Flavours:
if f.startswith("--VisualC.discoverprereleases"):
return True
except:
return False
def init(self, options):
visualc_version, visualc_version_split = self._get_visualc_version(options)
visualc_major_version = int(visualc_version_split[0])
# location of MSBuild changed in VS2013, and VS2017
if visualc_major_version >= 15:
visualStudioVersion = visualStudioVersionMapping[visualc_version]
msbuild_version = msBuildVersionToNetMapping[visualc_version]
edition = "Preview" if self._get_visualc_ispreview(options) else "Community"
if os.environ.has_key("ProgramFiles(x86)"):
self._ms_build_path = r"C:\Program Files (x86)\Microsoft Visual Studio\%s\%s\MSBuild\%s\Bin\MSBuild.exe" % (visualStudioVersion, edition, msbuild_version)
else:
self._ms_build_path = r"C:\Program Files (x86)\Microsoft Visual Studio\%s\%s\MSBuild\%s\Bin\amd64\MSBuild.exe" % (visualStudioVersion, edition, msbuild_version)
elif visualc_major_version >= 12:
# VS2013 onwards path for MSBuild
if os.environ.has_key("ProgramFiles(x86)"):
self._ms_build_path = r"C:\Program Files (x86)\MSBuild\%s\bin\MSBuild.exe" % visualc_version
else:
self._ms_build_path = r"C:\Program Files\MSBuild\%s\bin\MSBuild.exe" % visualc_version
else:
self._ms_build_path = r"C:\Windows\Microsoft.NET\Framework\%s\MSBuild.exe" % msBuildVersionToNetMapping[visualc_version]
def post_action(self, instance, options, output_messages, error_messages):
exit_code = 0
build_root = os.path.join(instance.package_path(), options.buildRoot)
solution_path = os.path.join(build_root, instance.package_name() + ".sln")
if not os.path.exists(solution_path):
# TODO: really need something different here - an invalid test result, rather than a failure
output_messages.write("VisualStudio solution expected at %s did not exist" % solution_path)
return 0
for config in options.configurations:
arg_list = [
self._ms_build_path,
"/m:%d" % self.num_threads,
"/verbosity:quiet",
solution_path
]
# capitalize the first letter of the configuration
config = config[0].upper() + config[1:]
arg_list.append("/p:Configuration=%s" % config)
for platform in instance.platforms():
this_arg_list = copy.deepcopy(arg_list)
this_arg_list.append("/p:Platform=%s" % platform)
exit_code |= self._execute_and_capture(this_arg_list, build_root, output_messages, error_messages)
return exit_code
def dump_generated_files(self, instance, options):
build_root = os.path.join(instance.package_path(), options.buildRoot)
vcxproj_project_path = os.path.join(build_root, "*.vcxproj")
import glob
projects = glob.glob(xcode_project_path)
for project in projects:
f = open(project, 'r')
file_contents = f.read()
f.close()
locallog("------------------------------")
locallog("VisualStudio project '%s': " % path)
locallog(file_contents)
class MakeFileBuilder(Builder):
def __init__(self):
super(MakeFileBuilder, self).__init__(False)
self._make_executable = 'make'
self._make_args = []
def init(self, options):
if sys.platform.startswith("win"):
arg_list = [
'where',
'/R',
os.path.expandvars('%ProgramFiles(x86)%'),
'nmake.exe'
]
p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output_stream, error_stream) = p.communicate() # this should WAIT
if output_stream:
split = output_stream.splitlines()
self._make_executable = split[0].strip()
self._make_args.append('-NOLOGO')
else:
self._make_args.append("-j")
self._make_args.append(str(self.num_threads))
def post_action(self, instance, options, output_messages, error_messages):
makefile_dir = os.path.join(instance.package_path(), options.buildRoot)
if not os.path.exists(makefile_dir):
# TODO: really need something different here - an invalid test result, rather than a failure
output_messages.write("Expected folder containing MakeFile %s did not exist" % makefile_dir)
return 0
# currently do not support building configurations separately
arg_list = [
self._make_executable
]
arg_list.extend(self._make_args)
return self._execute_and_capture(arg_list, makefile_dir, output_messages, error_messages)
def dump_generated_files(self, instance, options):
makefile_dir = os.path.join(instance.package_path(), options.buildRoot)
path = os.path.join(makefile_dir, 'Makefile')
f = open(path, 'r')
file_contents = f.read()
f.close()
locallog("------------------------------")
locallog("Makefile '%s': " % path)
locallog(file_contents)
class XcodeBuilder(Builder):
def __init__(self):
super(XcodeBuilder, self).__init__(False)
def post_action(self, instance, options, output_messages, error_messages):
exit_code = 0
build_root = os.path.join(instance.package_path(), options.buildRoot)
xcode_workspace_path = os.path.join(build_root, "*.xcworkspace")
import glob
workspaces = glob.glob(xcode_workspace_path)
if not workspaces:
# TODO: really need something different here - an invalid test result, rather than a failure
output_messages.write("Xcode workspace expected at %s did not exist" % xcode_workspace_path)
return 0
if len(workspaces) > 1:
output_messages.write("More than one Xcode workspace was found")
return -1
# first, list all the schemes available
arg_list = [
"xcodebuild",
"-workspace",
workspaces[0],
"-list"
]
locallog("Running '%s'\n" % ' '.join(arg_list))
p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output_stream, error_stream) = p.communicate() # this should WAIT
output_messages.write(output_stream)
error_messages.write(error_stream)
# parse the output to get the schemes
lines = output_stream.split('\n')
if len(lines) < 3:
raise RuntimeError("Unable to parse workspace for schemes. \
Was --Xcode.generateSchemes passed to the Bam build?")
schemes = []
has_schemes = False
for line in lines:
trimmed = line.strip()
if has_schemes:
if trimmed:
schemes.append(trimmed)
elif trimmed.startswith('Schemes:'):
has_schemes = True
if not has_schemes or len(schemes) == 0:
raise RuntimeError("No schemes were extracted from the workspace. \
Has the project scheme cache been warmed?")
# iterate over all the schemes and configurations
for scheme in schemes:
for config in options.configurations:
arg_list = [
"xcodebuild",
"-jobs",
str(self.num_threads),
"-workspace",
workspaces[0],
"-scheme",
scheme,
"-configuration"
]
# capitalize the first letter of the configuration
config = config[0].upper() + config[1:]
arg_list.append(config)
exit_code |= self._execute_and_capture(arg_list, build_root, output_messages, error_messages)
return exit_code
def dump_generated_files(self, instance, options):
build_root = os.path.join(instance.package_path(), options.buildRoot)
xcode_project_path = os.path.join(build_root, "*.xcodeproj")
import glob
projects = glob.glob(xcode_project_path)
for project in projects:
path = os.path.join(project, 'project.pbxproj')
f = open(path, 'r')
file_contents = f.read()
f.close()
locallog("------------------------------")
locallog("Xcode project '%s': " % path)
locallog(file_contents)
builder = {
"Native": NativeBuilder(),
"VSSolution": VSSolutionBuilder(),
"MakeFile": MakeFileBuilder(),
"Xcode": XcodeBuilder()
}
def get_builder_details(builder_name):
"""Return the Builder associated with the name passed in
Args:
builder_name:
"""
return builder[builder_name]
| bsd-3-clause |
z00223295/kubernetes | cluster/juju/layers/kubernetes/reactive/k8s.py | 53 | 14370 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shlex import split
from shutil import copy2
from subprocess import check_call
from charms.docker.compose import Compose
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charmhelpers.core import hookenv
from charmhelpers.core.hookenv import is_leader
from charmhelpers.core.hookenv import status_set
from charmhelpers.core.templating import render
from charmhelpers.core import unitdata
from charmhelpers.core.host import chdir
from contextlib import contextmanager
@hook('config-changed')
def config_changed():
'''If the configuration values change, remove the available states.'''
config = hookenv.config()
if any(config.changed(key) for key in config.keys()):
hookenv.log('Configuration options have changed.')
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
hookenv.log('Removing kubelet container and kubelet.available state.')
# Stop and remove the Kubernetes kubelet container..
compose.kill('kubelet')
compose.rm('kubelet')
# Remove the state so the code can react to restarting kubelet.
remove_state('kubelet.available')
hookenv.log('Removing proxy container and proxy.available state.')
# Stop and remove the Kubernetes proxy container.
compose.kill('proxy')
compose.rm('proxy')
# Remove the state so the code can react to restarting proxy.
remove_state('proxy.available')
if config.changed('version'):
hookenv.log('Removing kubectl.downloaded state so the new version'
' of kubectl will be downloaded.')
remove_state('kubectl.downloaded')
@when('tls.server.certificate available')
@when_not('k8s.server.certificate available')
def server_cert():
'''When the server certificate is available, get the server certificate from
the charm unit data and write it to the proper directory. '''
destination_directory = '/srv/kubernetes'
# Save the server certificate from unitdata to /srv/kubernetes/server.crt
save_certificate(destination_directory, 'server')
# Copy the unitname.key to /srv/kubernetes/server.key
copy_key(destination_directory, 'server')
set_state('k8s.server.certificate available')
@when('tls.client.certificate available')
@when_not('k8s.client.certficate available')
def client_cert():
'''When the client certificate is available, get the client certificate
from the charm unitdata and write it to the proper directory. '''
destination_directory = '/srv/kubernetes'
if not os.path.isdir(destination_directory):
os.makedirs(destination_directory)
os.chmod(destination_directory, 0o770)
# The client certificate is also available on charm unitdata.
client_cert_path = 'easy-rsa/easyrsa3/pki/issued/client.crt'
kube_cert_path = os.path.join(destination_directory, 'client.crt')
if os.path.isfile(client_cert_path):
# Copy the client.crt to /srv/kubernetes/client.crt
copy2(client_cert_path, kube_cert_path)
# The client key is only available on the leader.
client_key_path = 'easy-rsa/easyrsa3/pki/private/client.key'
kube_key_path = os.path.join(destination_directory, 'client.key')
if os.path.isfile(client_key_path):
# Copy the client.key to /srv/kubernetes/client.key
copy2(client_key_path, kube_key_path)
@when('tls.certificate.authority available')
@when_not('k8s.certificate.authority available')
def ca():
'''When the Certificate Authority is available, copy the CA from the
/usr/local/share/ca-certificates/k8s.crt to the proper directory. '''
# Ensure the /srv/kubernetes directory exists.
directory = '/srv/kubernetes'
if not os.path.isdir(directory):
os.makedirs(directory)
os.chmod(directory, 0o770)
# Normally the CA is just on the leader, but the tls layer installs the
# CA on all systems in the /usr/local/share/ca-certificates directory.
ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(
hookenv.service_name())
# The CA should be copied to the destination directory and named 'ca.crt'.
destination_ca_path = os.path.join(directory, 'ca.crt')
if os.path.isfile(ca_path):
copy2(ca_path, destination_ca_path)
set_state('k8s.certificate.authority available')
@when('kubelet.available', 'proxy.available', 'cadvisor.available')
def final_messaging():
'''Lower layers emit messages, and if we do not clear the status messaging
queue, we are left with whatever the last method call sets status to. '''
# It's good UX to have consistent messaging that the cluster is online
if is_leader():
status_set('active', 'Kubernetes leader running')
else:
status_set('active', 'Kubernetes follower running')
@when('kubelet.available', 'proxy.available', 'cadvisor.available')
@when_not('skydns.available')
def launch_skydns():
'''Create a kubernetes service and resource controller for the skydns
service. '''
# Only launch and track this state on the leader.
# Launching duplicate SkyDNS rc will raise an error
if not is_leader():
return
cmd = "kubectl create -f files/manifests/skydns-rc.yml"
check_call(split(cmd))
cmd = "kubectl create -f files/manifests/skydns-svc.yml"
check_call(split(cmd))
set_state('skydns.available')
@when('docker.available')
@when_not('etcd.available')
def relation_message():
'''Take over messaging to let the user know they are pending a relationship
to the ETCD cluster before going any further. '''
status_set('waiting', 'Waiting for relation to ETCD')
@when('etcd.available', 'tls.server.certificate available')
@when_not('kubelet.available', 'proxy.available')
def master(etcd):
'''Install and run the hyperkube container that starts kubernetes-master.
This actually runs the kubelet, which in turn runs a pod that contains the
other master components. '''
render_files(etcd)
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
status_set('maintenance', 'Starting the Kubernetes kubelet container.')
# Start the Kubernetes kubelet container using docker-compose.
compose.up('kubelet')
set_state('kubelet.available')
# Open the secure port for api-server.
hookenv.open_port(6443)
status_set('maintenance', 'Starting the Kubernetes proxy container')
# Start the Kubernetes proxy container using docker-compose.
compose.up('proxy')
set_state('proxy.available')
status_set('active', 'Kubernetes started')
@when('proxy.available')
@when_not('kubectl.downloaded')
def download_kubectl():
'''Download the kubectl binary to test and interact with the cluster.'''
status_set('maintenance', 'Downloading the kubectl binary')
version = hookenv.config()['version']
cmd = 'wget -nv -O /usr/local/bin/kubectl https://storage.googleapis.com/' \
'kubernetes-release/release/{0}/bin/linux/amd64/kubectl'
cmd = cmd.format(version)
hookenv.log('Downloading kubelet: {0}'.format(cmd))
check_call(split(cmd))
cmd = 'chmod +x /usr/local/bin/kubectl'
check_call(split(cmd))
set_state('kubectl.downloaded')
status_set('active', 'Kubernetes installed')
@when('kubectl.downloaded')
@when_not('kubectl.package.created')
def package_kubectl():
'''Package the kubectl binary and configuration to a tar file for users
to consume and interact directly with Kubernetes.'''
if not is_leader():
return
context = 'default-context'
cluster_name = 'kubernetes'
public_address = hookenv.unit_public_ip()
directory = '/srv/kubernetes'
key = 'client.key'
ca = 'ca.crt'
cert = 'client.crt'
user = 'ubuntu'
port = '6443'
with chdir(directory):
# Create the config file with the external address for this server.
cmd = 'kubectl config set-cluster --kubeconfig={0}/config {1} ' \
'--server=https://{2}:{3} --certificate-authority={4}'
check_call(split(cmd.format(directory, cluster_name, public_address,
port, ca)))
# Create the credentials.
cmd = 'kubectl config set-credentials --kubeconfig={0}/config {1} ' \
'--client-key={2} --client-certificate={3}'
check_call(split(cmd.format(directory, user, key, cert)))
# Create a default context with the cluster.
cmd = 'kubectl config set-context --kubeconfig={0}/config {1}' \
' --cluster={2} --user={3}'
check_call(split(cmd.format(directory, context, cluster_name, user)))
# Now make the config use this new context.
cmd = 'kubectl config use-context --kubeconfig={0}/config {1}'
check_call(split(cmd.format(directory, context)))
# Copy the kubectl binary to this directory
cmd = 'cp -v /usr/local/bin/kubectl {0}'.format(directory)
check_call(split(cmd))
# Create an archive with all the necessary files.
cmd = 'tar -cvzf /home/ubuntu/kubectl_package.tar.gz ca.crt client.crt client.key config kubectl' # noqa
check_call(split(cmd))
set_state('kubectl.package.created')
@when('proxy.available')
@when_not('cadvisor.available')
def start_cadvisor():
'''Start the cAdvisor container that gives metrics about the other
application containers on this system. '''
compose = Compose('files/kubernetes')
compose.up('cadvisor')
set_state('cadvisor.available')
status_set('active', 'cadvisor running on port 8088')
hookenv.open_port(8088)
@when('sdn.available')
def gather_sdn_data():
'''Get the Software Defined Network (SDN) information and return it as a
dictionary.'''
# SDN Providers pass data via the unitdata.kv module
db = unitdata.kv()
# Generate an IP address for the DNS provider
subnet = db.get('sdn_subnet')
if subnet:
ip = subnet.split('/')[0]
dns_server = '.'.join(ip.split('.')[0:-1]) + '.10'
addedcontext = {}
addedcontext['dns_server'] = dns_server
return addedcontext
return {}
def copy_key(directory, prefix):
'''Copy the key from the easy-rsa/easyrsa3/pki/private directory to the
specified directory. '''
if not os.path.isdir(directory):
os.makedirs(directory)
os.chmod(directory, 0o770)
# Must remove the path characters from the local unit name.
path_name = hookenv.local_unit().replace('/', '_')
# The key is not in unitdata it is in the local easy-rsa directory.
local_key_path = 'easy-rsa/easyrsa3/pki/private/{0}.key'.format(path_name)
key_name = '{0}.key'.format(prefix)
# The key should be copied to this directory.
destination_key_path = os.path.join(directory, key_name)
# Copy the key file from the local directory to the destination.
copy2(local_key_path, destination_key_path)
def render_files(reldata=None):
'''Use jinja templating to render the docker-compose.yml and master.json
file to contain the dynamic data for the configuration files.'''
context = {}
# Load the context manager with sdn and config data.
context.update(gather_sdn_data())
context.update(hookenv.config())
if reldata:
context.update({'connection_string': reldata.connection_string()})
charm_dir = hookenv.charm_dir()
rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
if not os.path.exists(rendered_kube_dir):
os.makedirs(rendered_kube_dir)
rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
if not os.path.exists(rendered_manifest_dir):
os.makedirs(rendered_manifest_dir)
# Add the manifest directory so the docker-compose file can have.
context.update({'manifest_directory': rendered_manifest_dir,
'private_address': hookenv.unit_get('private-address')})
# Render the files/kubernetes/docker-compose.yml file that contains the
# definition for kubelet and proxy.
target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
render('docker-compose.yml', target, context)
# Render the files/manifests/master.json that contains parameters for the
# apiserver, controller, and controller-manager
target = os.path.join(rendered_manifest_dir, 'master.json')
render('master.json', target, context)
# Render files/kubernetes/skydns-svc.yaml for SkyDNS service
target = os.path.join(rendered_manifest_dir, 'skydns-svc.yml')
render('skydns-svc.yml', target, context)
# Render files/kubernetes/skydns-rc.yaml for SkyDNS pods
target = os.path.join(rendered_manifest_dir, 'skydns-rc.yml')
render('skydns-rc.yml', target, context)
def save_certificate(directory, prefix):
'''Get the certificate from the charm unitdata, and write it to the proper
directory. The parameters are: destination directory, and prefix to use
for the key and certificate name.'''
if not os.path.isdir(directory):
os.makedirs(directory)
os.chmod(directory, 0o770)
# Grab the unitdata key value store.
store = unitdata.kv()
certificate_data = store.get('tls.{0}.certificate'.format(prefix))
certificate_name = '{0}.crt'.format(prefix)
# The certificate should be saved to this directory.
certificate_path = os.path.join(directory, certificate_name)
# write the server certificate out to the correct location
with open(certificate_path, 'w') as fp:
fp.write(certificate_data)
| apache-2.0 |
yewang15215/django | tests/gis_tests/layermap/tests.py | 7 | 14506 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
from copy import copy
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import HAS_GEOS
from django.db import connection
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.utils._os import upath
if HAS_GEOS and HAS_GDAL:
from django.contrib.gis.utils.layermapping import (
LayerMapping, LayerMapError, InvalidDecimal, InvalidString,
MissingForeignKey,
)
from django.contrib.gis.gdal import DataSource
from .models import (
City, County, CountyFeat, Interstate, ICity1, ICity2, Invalid, State,
city_mapping, co_mapping, cofeat_mapping, inter_mapping,
)
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), os.pardir, 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
@skipUnlessDBFeature("gis_enabled")
class LayerMapTest(TestCase):
def test_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
with self.assertRaises(LayerMapError):
LayerMapping(City, city_shp, bad_map)
# A LookupError should be thrown for bogus encodings.
with self.assertRaises(LookupError):
LayerMapping(City, city_shp, city_mapping, encoding='foobar')
def test_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 5)
self.assertAlmostEqual(pnt1.y, pnt2.y, 5)
def test_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
with self.assertRaises(InvalidDecimal):
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
Interstate.objects.all().delete()
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the layer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncated,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
# Now test for failures
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
with self.assertRaises(e):
LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if connection.features.supports_transform:
with self.assertRaises(LayerMapError):
LayerMapping(County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping)
bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping)
bad_fk_map2['state'] = {'nombre': 'State'}
with self.assertRaises(TypeError):
LayerMapping(County, co_shp, bad_fk_map1, transform=False)
with self.assertRaises(LayerMapError):
LayerMapping(County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
with self.assertRaises(MissingForeignKey):
lm.save(silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition that they have
# unique county names; geometries from each feature however will be
# appended to the geometry collection of the unique model. Thus,
# all of the various islands in Honolulu county will be in in one
# database record with a MULTIPOLYGON type.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
# A reference that doesn't use the unique keyword; a new database record will
# created for each polygon.
lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
lm.save(silent=True, strict=True)
# The county helper is called to ensure integrity of County models.
self.county_helper()
def test_test_fid_range_step(self):
"Tests the `fid_range` keyword and the `step` keyword of .save()."
# Function for clearing out all the counties before testing.
def clear_counties():
County.objects.all().delete()
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# Initializing the LayerMapping object to use in these tests.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
# Bad feature id ranges should raise a type error.
bad_ranges = (5.0, 'foo', co_shp)
for bad in bad_ranges:
with self.assertRaises(TypeError):
lm.save(fid_range=bad)
# Step keyword should not be allowed w/`fid_range`.
fr = (3, 5) # layer[3:5]
with self.assertRaises(LayerMapError):
lm.save(fid_range=fr, step=10)
lm.save(fid_range=fr)
# Features IDs 3 & 4 are for Galveston County, Texas -- only
# one model is returned because the `unique` keyword was set.
qs = County.objects.all()
self.assertEqual(1, qs.count())
self.assertEqual('Galveston', qs[0].name)
# Features IDs 5 and beyond for Honolulu County, Hawaii, and
# FID 0 is for Pueblo County, Colorado.
clear_counties()
lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:]
lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1]
# Only Pueblo & Honolulu counties should be present because of
# the `unique` keyword. Have to set `order_by` on this QuerySet
# or else MySQL will return a different ordering than the other dbs.
qs = County.objects.order_by('name')
self.assertEqual(2, qs.count())
hi, co = tuple(qs)
hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo')))
self.assertEqual('Pueblo', co.name)
self.assertEqual(NUMS[co_idx], len(co.mpoly))
self.assertEqual('Honolulu', hi.name)
self.assertEqual(NUMS[hi_idx], len(hi.mpoly))
# Testing the `step` keyword -- should get the same counties
# regardless of we use a step that divides equally, that is odd,
# or that is larger than the dataset.
for st in (4, 7, 1000):
clear_counties()
lm.save(step=st, strict=True)
self.county_helper(county_feat=False)
def test_model_inheritance(self):
"Tests LayerMapping on inherited models. See #12093."
icity_mapping = {'name': 'Name',
'population': 'Population',
'density': 'Density',
'point': 'POINT',
'dt': 'Created',
}
# Parent model has geometry field.
lm1 = LayerMapping(ICity1, city_shp, icity_mapping)
lm1.save()
# Grandparent has geometry field.
lm2 = LayerMapping(ICity2, city_shp, icity_mapping)
lm2.save()
self.assertEqual(6, ICity1.objects.count())
self.assertEqual(3, ICity2.objects.count())
def test_invalid_layer(self):
"Tests LayerMapping on invalid geometries. See #15378."
invalid_mapping = {'point': 'POINT'}
lm = LayerMapping(Invalid, invalid_shp, invalid_mapping,
source_srs=4326)
lm.save(silent=True)
def test_charfield_too_short(self):
mapping = copy(city_mapping)
mapping['name_short'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
with self.assertRaises(InvalidString):
lm.save(silent=True, strict=True)
def test_textfield(self):
"String content fits also in a TextField"
mapping = copy(city_mapping)
mapping['name_txt'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 3)
self.assertEqual(City.objects.get(name='Houston').name_txt, "Houston")
def test_encoded_name(self):
""" Test a layer containing utf-8-encoded name """
city_shp = os.path.join(shp_path, 'ch-city', 'ch-city.shp')
lm = LayerMapping(City, city_shp, city_mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 1)
self.assertEqual(City.objects.all()[0].name, "Zürich")
class OtherRouter(object):
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return self.db_for_read(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, app_label, **hints):
return True
@skipUnlessDBFeature("gis_enabled")
@override_settings(DATABASE_ROUTERS=[OtherRouter()])
class LayerMapRouterTest(TestCase):
@unittest.skipUnless(len(settings.DATABASES) > 1, 'multiple databases required')
def test_layermapping_default_db(self):
lm = LayerMapping(City, city_shp, city_mapping)
self.assertEqual(lm.using, 'other')
| bsd-3-clause |
rdhyee/PyTables | tables/parameters.py | 3 | 14633 | # -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: February 25, 2005
# Author: Ivan Vilata - reverse:net.selidor@ivan
#
# $Id$
#
########################################################################
"""Parameters for PyTables."""
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
_KB = 1024
"""The size of a Kilobyte in bytes"""
_MB = 1024 * _KB
"""The size of a Megabyte in bytes"""
# Tunable parameters
# ==================
# Be careful when touching these!
# Parameters for different internal caches
# ----------------------------------------
BOUNDS_MAX_SIZE = 1 * _MB
"""The maximum size for bounds values cached during index lookups."""
BOUNDS_MAX_SLOTS = 4 * _KB
"""The maximum number of slots for the BOUNDS cache."""
ITERSEQ_MAX_ELEMENTS = 1 * _KB
"""The maximum number of iterator elements cached in data lookups."""
ITERSEQ_MAX_SIZE = 1 * _MB
"""The maximum space that will take ITERSEQ cache (in bytes)."""
ITERSEQ_MAX_SLOTS = 128
"""The maximum number of slots in ITERSEQ cache."""
LIMBOUNDS_MAX_SIZE = 256 * _KB
"""The maximum size for the query limits (for example, ``(lim1, lim2)``
in conditions like ``lim1 <= col < lim2``) cached during index lookups
(in bytes)."""
LIMBOUNDS_MAX_SLOTS = 128
"""The maximum number of slots for LIMBOUNDS cache."""
TABLE_MAX_SIZE = 1 * _MB
"""The maximum size for table chunks cached during index queries."""
SORTED_MAX_SIZE = 1 * _MB
"""The maximum size for sorted values cached during index lookups."""
SORTEDLR_MAX_SIZE = 8 * _MB
"""The maximum size for chunks in last row cached in index lookups (in
bytes)."""
SORTEDLR_MAX_SLOTS = 1 * _KB
"""The maximum number of chunks for SORTEDLR cache."""
# Parameters for general cache behaviour
# --------------------------------------
#
# The next parameters will not be effective if passed to the
# `open_file()` function (so, they can only be changed in a *global*
# way). You can change them in the file, but this is strongly
# discouraged unless you know well what you are doing.
DISABLE_EVERY_CYCLES = 10
"""The number of cycles in which a cache will be forced to be disabled
if the hit ratio is lower than the LOWEST_HIT_RATIO (see below). This
value should provide time enough to check whether the cache is being
efficient or not."""
ENABLE_EVERY_CYCLES = 50
"""The number of cycles in which a cache will be forced to be
(re-)enabled, irregardingly of the hit ratio. This will provide a chance
for checking if we are in a better scenario for doing caching again."""
LOWEST_HIT_RATIO = 0.6
"""The minimum acceptable hit ratio for a cache to avoid disabling (and
freeing) it."""
# Tunable parameters
# ==================
# Be careful when touching these!
# Recommended maximum values
# --------------------------
# Following are the recommended values for several limits. However,
# these limits are somewhat arbitrary and can be increased if you have
# enough resources.
MAX_COLUMNS = 512
"""Maximum number of columns in :class:`tables.Table` objects before a
:exc:`tables.PerformanceWarning` is issued. This limit is somewhat
arbitrary and can be increased.
"""
MAX_NODE_ATTRS = 4 * _KB
"""Maximum allowed number of attributes in a node."""
MAX_GROUP_WIDTH = 16 * _KB
"""Maximum allowed number of children hanging from a group."""
MAX_TREE_DEPTH = 2 * _KB
"""Maximum depth in object tree allowed."""
MAX_UNDO_PATH_LENGTH = 10 * _KB
"""Maximum length of paths allowed in undo/redo operations."""
# Cache limits
# ------------
COND_CACHE_SLOTS = 128
"""Maximum number of conditions for table queries to be kept in memory."""
CHUNK_CACHE_NELMTS = 521
"""Number of elements for HDF5 chunk cache."""
CHUNK_CACHE_PREEMPT = 0.0
"""Chunk preemption policy. This value should be between 0 and 1
inclusive and indicates how much chunks that have been fully read are
favored for preemption. A value of zero means fully read chunks are
treated no differently than other chunks (the preemption is strictly
LRU) while a value of one means fully read chunks are always preempted
before other chunks."""
CHUNK_CACHE_SIZE = 2 * _MB
"""Size (in bytes) for HDF5 chunk cache."""
# Size for new metadata cache system
METADATA_CACHE_SIZE = 1 * _MB # 1 MB is the default for HDF5
"""Size (in bytes) of the HDF5 metadata cache."""
# NODE_CACHE_SLOTS tells the number of nodes that fits in the cache.
#
# There are several forces driving the election of this number:
# 1.- As more nodes, better chances to re-use nodes
# --> better performance
# 2.- As more nodes, the re-ordering of the LRU cache takes more time
# --> less performance
# 3.- As more nodes, the memory needs for PyTables grows, specially for table
# writings (that could take double of memory than table reads!).
#
# The default value here is quite conservative. If you have a system
# with tons of memory, and if you are touching regularly a very large
# number of leaves, try increasing this value and see if it fits better
# for you. Please report back your feedback.
NODE_CACHE_SLOTS = 64
"""Maximum number of nodes to be kept in the metadata cache.
It is the number of nodes to be kept in the metadata cache. Least recently
used nodes are unloaded from memory when this number of loaded nodes is
reached. To load a node again, simply access it as usual.
Nodes referenced by user variables and, in general, all nodes that are still
open are registered in the node manager and can be quickly accessed even
if they are not in the cache.
Negative value means that all the touched nodes will be kept in an
internal dictionary. This is the faster way to load/retrieve nodes.
However, and in order to avoid a large memory comsumption, the user will
be warned when the number of loaded nodes will reach the
``-NODE_CACHE_SLOTS`` value.
Finally, a value of zero means that any cache mechanism is disabled.
"""
# Parameters for the I/O buffer in `Leaf` objects
# -----------------------------------------------
IO_BUFFER_SIZE = 1 * _MB
"""The PyTables internal buffer size for I/O purposes. Should not
exceed the amount of highest level cache size in your CPU."""
BUFFER_TIMES = 100
"""The maximum buffersize/rowsize ratio before issuing a
:exc:`tables.PerformanceWarning`."""
# Miscellaneous
# -------------
EXPECTED_ROWS_EARRAY = 1000
"""Default expected number of rows for :class:`EArray` objects."""
EXPECTED_ROWS_VLARRAY = 1000
"""Default expected number of rows for :class:`VLArray` objects.
.. versionadded:: 3.0
"""
EXPECTED_ROWS_TABLE = 10000
"""Default expected number of rows for :class:`Table` objects."""
PYTABLES_SYS_ATTRS = True
"""Set this to ``False`` if you don't want to create PyTables system
attributes in datasets. Also, if set to ``False`` the possible existing
system attributes are not considered for guessing the class of the node
during its loading from disk (this work is delegated to the PyTables'
class discoverer function for general HDF5 files)."""
MAX_NUMEXPR_THREADS = 2
"""The maximum number of threads that PyTables should use internally in
Numexpr. If `None`, it is automatically set to the number of cores in
your machine. In general, it is a good idea to set this to the number of
cores in your machine or, when your machine has many of them (e.g. > 8),
perhaps stay at 8 at maximum. In general, 2 threads is a good tradeoff."""
MAX_BLOSC_THREADS = 2
"""The maximum number of threads that PyTables should use internally in
Blosc. If `None`, it is automatically set to the number of cores in
your machine. In general, it is a good idea to set this to the number of
cores in your machine or, when your machine has many of them (e.g. > 8),
perhaps stay at 8 at maximum. In general, 2 threads is a good tradeoff."""
USER_BLOCK_SIZE = 0
"""Sets the user block size of a file.
The default user block size is 0; it may be set to any power of 2 equal
to 512 or greater (512, 1024, 2048, etc.).
.. versionadded:: 3.0
"""
# HDF5 driver management
# ----------------------
DRIVER = None
"""The HDF5 driver that should be used for reading/writing to the file.
Following drivers are supported:
* H5FD_SEC2: this driver uses POSIX file-system functions like read
and write to perform I/O to a single, permanent file on local
disk with no system buffering.
This driver is POSIX-compliant and is the default file driver for
all systems.
* H5FD_DIRECT: this is the H5FD_SEC2 driver except data is written
to or read from the file synchronously without being cached by
the system.
* H5FD_WINDOWS: this driver was modified in HDF5-1.8.8 to be a
wrapper of the POSIX driver, H5FD_SEC2. This change should not
affect user applications.
* H5FD_STDIO: this driver uses functions from the standard C
stdio.h to perform I/O to a single, permanent file on local disk
with additional system buffering.
* H5FD_CORE: with this driver, an application can work with a file
in memory for faster reads and writes. File contents are kept in
memory until the file is closed. At closing, the memory version
of the file can be written back to disk or abandoned.
* H5FD_SPLIT: this file driver splits a file into two parts.
One part stores metadata, and the other part stores raw data.
This splitting a file into two parts is a limited case of the
Multi driver.
The following drivers are not currently supported:
* H5FD_LOG: this is the H5FD_SEC2 driver with logging capabilities.
* H5FD_FAMILY: with this driver, the HDF5 file’s address space is
partitioned into pieces and sent to separate storage files using
an underlying driver of the user’s choice.
This driver is for systems that do not support files larger than
2 gigabytes.
* H5FD_MULTI: with this driver, data can be stored in multiple
files according to the type of the data. I/O might work better if
data is stored in separate files based on the type of data.
The Split driver is a special case of this driver.
* H5FD_MPIO: this is the standard HDF5 file driver for parallel
file systems. This driver uses the MPI standard for both
communication and file I/O.
* H5FD_MPIPOSIX: this parallel file system driver uses MPI for
communication and POSIX file-system calls for file I/O.
* H5FD_STREAM: this driver is no longer available.
.. seealso:: the `Drivers section`_ of the `HDF5 User's Guide`_ for
more information.
.. note::
not all supported drivers are always available. For example the
H5FD_WINDOWS driver is not available on non Windows platforms.
If the user try to use a driver that is not available on the target
platform a :exc:`RuntimeError` is raised.
.. versionadded:: 3.0
.. _`Drivers section`:
http://www.hdfgroup.org/HDF5/doc/UG/08_TheFile.html#Drivers
.. _`HDF5 User's Guide`: http://www.hdfgroup.org/HDF5/doc/UG/index.html
"""
DRIVER_DIRECT_ALIGNMENT = 0
"""Specifies the required alignment boundary in memory.
A value of 0 (zero) means to use HDF5 Library’s default value.
.. versionadded:: 3.0
"""
DRIVER_DIRECT_BLOCK_SIZE = 0
"""Specifies the file system block size.
A value of 0 (zero) means to use HDF5 Library’s default value of 4KB.
.. versionadded:: 3.0
"""
DRIVER_DIRECT_CBUF_SIZE = 0
"""Specifies the copy buffer size.
A value of 0 (zero) means to use HDF5 Library’s default value.
.. versionadded:: 3.0
"""
# DRIVER_LOG_FLAGS = 0x0001ffff
#"""Flags specifying the types of logging activity.
#
#.. versionadded:: 3.0
#
#.. seeealso::
# http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetFaplLog
#
#"""
#
# DRIVER_LOG_BUF_SIZE = 4 * _KB
#"""The size of the logging buffers, in bytes.
#
# One buffer of size DRIVER_LOG_BUF_SIZE will be created for each of
# H5FD_LOG_FILE_READ, H5FD_LOG_FILE_WRITE and H5FD_LOG_FLAVOR when those
# flags are set; these buffers will not grow as the file increases in
# size.
#
#.. versionadded:: 3.0
#
#"""
DRIVER_CORE_INCREMENT = 64 * _KB
"""Core driver memory increment.
Specifies the increment by which allocated memory is to be increased
each time more memory is required.
.. versionadded:: 3.0
"""
DRIVER_CORE_BACKING_STORE = 1
"""Enables backing store for the core driver.
With the H5FD_CORE driver, if the DRIVER_CORE_BACKING_STORE is set
to 1 (True), the file contents are flushed to a file with the same name
as this core file when the file is closed or access to the file is
terminated in memory.
The application is allowed to open an existing file with H5FD_CORE
driver. In that case, if the DRIVER_CORE_BACKING_STORE is set to 1 and
the flags for :func:`tables.open_file` is set to H5F_ACC_RDWR, any change
to the file contents are saved to the file when the file is closed.
If backing_store is set to 0 and the flags for :func:`tables.open_file`
is set to H5F_ACC_RDWR, any change to the file contents will be lost
when the file is closed. If the flags for :func:`tables.open_file` is
set to H5F_ACC_RDONLY, no change to the file is allowed either in
memory or on file.
.. versionadded:: 3.0
"""
DRIVER_CORE_IMAGE = None
"""String containing an HDF5 file image.
If this oprion is passed to the :func:`tables.open_file` function then the
returned file object is set up using the specified image.
A file image can be retrieved from an existing (and opened) file object
using the :meth:`tables.File.get_file_image` method.
.. note:: requires HDF5 >= 1.8.9.
.. versionadded:: 3.0
"""
DRIVER_SPLIT_META_EXT = '-m.h5'
"""The extension for the metadata file used by the H5FD_SPLIT driver.
If this option is passed to the :func:`tables.openFile` function along
with driver='H5FD_SPLIT', the extension is appended to the name passed
as the first parameter to form the name of the metadata file. If the
string '%s' is used in the extension, the metadata file name is formed
by replacing '%s' with the name passed as the first parameter instead.
.. versionadded:: 3.1
"""
DRIVER_SPLIT_RAW_EXT = '-r.h5'
"""The extension for the raw data file used by the H5FD_SPLIT driver.
If this option is passed to the :func:`tables.openFile` function along
with driver='H5FD_SPLIT', the extension is appended to the name passed
as the first parameter to form the name of the raw data file. If the
string '%s' is used in the extension, the raw data file name is formed
by replacing '%s' with the name passed as the first parameter instead.
.. versionadded:: 3.1
"""
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
| bsd-3-clause |
Belxjander/Kirito | Python-3.5.0-Amiga/Lib/typing.py | 3 | 53427 | # TODO nits:
# Get rid of asserts that are the caller's fault.
# Docstrings (e.g. ABCs).
import abc
from abc import abstractmethod, abstractproperty
import collections
import functools
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc # Fallback for PY3.2.
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Any',
'Callable',
'Generic',
'Optional',
'TypeVar',
'Union',
'Tuple',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsFloat',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'Dict',
'List',
'Set',
'NamedTuple', # Not really a type.
'Generator',
# One-off things.
'AnyStr',
'cast',
'get_type_hints',
'no_type_check',
'no_type_check_decorator',
'overload',
# Submodules.
'io',
're',
]
def _qualname(x):
if sys.version_info[:2] >= (3, 3):
return x.__qualname__
else:
# Fall back to just name.
return x.__name__
class TypingMeta(type):
"""Metaclass for every type defined below.
This overrides __new__() to require an extra keyword parameter
'_root', which serves as a guard against naive subclassing of the
typing classes. Any legitimate class defined using a metaclass
derived from TypingMeta (including internal subclasses created by
e.g. Union[X, Y]) must pass _root=True.
This also defines a dummy constructor (all the work is done in
__new__) and a nicer repr().
"""
_is_protocol = False
def __new__(cls, name, bases, namespace, *, _root=False):
if not _root:
raise TypeError("Cannot subclass %s" %
(', '.join(map(_type_repr, bases)) or '()'))
return super().__new__(cls, name, bases, namespace)
def __init__(self, *args, **kwds):
pass
def _eval_type(self, globalns, localns):
"""Override this in subclasses to interpret forward references.
For example, Union['C'] is internally stored as
Union[_ForwardRef('C')], which should evaluate to _Union[C],
where C is an object found in globalns or localns (searching
localns first, of course).
"""
return self
def _has_type_var(self):
return False
def __repr__(self):
return '%s.%s' % (self.__module__, _qualname(self))
class Final:
"""Mix-in class to prevent instantiation."""
__slots__ = ()
def __new__(self, *args, **kwds):
raise TypeError("Cannot instantiate %r" % self.__class__)
class _ForwardRef(TypingMeta):
"""Wrapper to hold a forward reference."""
def __new__(cls, arg):
if not isinstance(arg, str):
raise TypeError('ForwardRef must be a string -- got %r' % (arg,))
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError('ForwardRef must be an expression -- got %r' %
(arg,))
self = super().__new__(cls, arg, (), {}, _root=True)
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
typing_globals = globals()
frame = sys._getframe(1)
while frame is not None and frame.f_globals is typing_globals:
frame = frame.f_back
assert frame is not None
self.__forward_frame__ = frame
return self
def _eval_type(self, globalns, localns):
if not isinstance(localns, dict):
raise TypeError('ForwardRef localns must be a dict -- got %r' %
(localns,))
if not isinstance(globalns, dict):
raise TypeError('ForwardRef globalns must be a dict -- got %r' %
(globalns,))
if not self.__forward_evaluated__:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
self.__forward_value__ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.")
self.__forward_evaluated__ = True
return self.__forward_value__
def __instancecheck__(self, obj):
raise TypeError("Forward references cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not self.__forward_evaluated__:
globalns = self.__forward_frame__.f_globals
localns = self.__forward_frame__.f_locals
try:
self._eval_type(globalns, localns)
except NameError:
return False # Too early.
return issubclass(cls, self.__forward_value__)
def __repr__(self):
return '_ForwardRef(%r)' % (self.__forward_arg__,)
class _TypeAlias:
"""Internal helper class for defining generic variants of concrete types.
Note that this is not a type; let's call it a pseudo-type. It can
be used in instance and subclass checks, e.g. isinstance(m, Match)
or issubclass(type(m), Match). However, it cannot be itself the
target of an issubclass() call; e.g. issubclass(Match, C) (for
some arbitrary class C) raises TypeError rather than returning
False.
"""
__slots__ = ('name', 'type_var', 'impl_type', 'type_checker')
def __new__(cls, *args, **kwds):
"""Constructor.
This only exists to give a better error message in case
someone tries to subclass a type alias (not a good idea).
"""
if (len(args) == 3 and
isinstance(args[0], str) and
isinstance(args[1], tuple)):
# Close enough.
raise TypeError("A type alias cannot be subclassed")
return object.__new__(cls)
def __init__(self, name, type_var, impl_type, type_checker):
"""Initializer.
Args:
name: The name, e.g. 'Pattern'.
type_var: The type parameter, e.g. AnyStr, or the
specific type, e.g. str.
impl_type: The implementation type.
type_checker: Function that takes an impl_type instance.
and returns a value that should be a type_var instance.
"""
assert isinstance(name, str), repr(name)
assert isinstance(type_var, type), repr(type_var)
assert isinstance(impl_type, type), repr(impl_type)
assert not isinstance(impl_type, TypingMeta), repr(impl_type)
self.name = name
self.type_var = type_var
self.impl_type = impl_type
self.type_checker = type_checker
def __repr__(self):
return "%s[%s]" % (self.name, _type_repr(self.type_var))
def __getitem__(self, parameter):
assert isinstance(parameter, type), repr(parameter)
if not isinstance(self.type_var, TypeVar):
raise TypeError("%s cannot be further parameterized." % self)
if self.type_var.__constraints__:
if not issubclass(parameter, Union[self.type_var.__constraints__]):
raise TypeError("%s is not a valid substitution for %s." %
(parameter, self.type_var))
return self.__class__(self.name, parameter,
self.impl_type, self.type_checker)
def __instancecheck__(self, obj):
raise TypeError("Type aliases cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if isinstance(cls, _TypeAlias):
# Covariance. For now, we compare by name.
return (cls.name == self.name and
issubclass(cls.type_var, self.type_var))
else:
# Note that this is too lenient, because the
# implementation type doesn't carry information about
# whether it is about bytes or str (for example).
return issubclass(cls, self.impl_type)
def _has_type_var(t):
return t is not None and isinstance(t, TypingMeta) and t._has_type_var()
def _eval_type(t, globalns, localns):
if isinstance(t, TypingMeta):
return t._eval_type(globalns, localns)
else:
return t
def _type_check(arg, msg):
"""Check that the argument is a type, and return it.
As a special case, accept None and return type(None) instead.
Also, _TypeAlias instances (e.g. Match, Pattern) are acceptable.
The msg argument is a human-readable error message, e.g.
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
if arg is None:
return type(None)
if isinstance(arg, str):
arg = _ForwardRef(arg)
if not isinstance(arg, (type, _TypeAlias)):
raise TypeError(msg + " Got %.100r." % (arg,))
return arg
def _type_repr(obj):
"""Return the repr() of an object, special-casing types.
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, type) and not isinstance(obj, TypingMeta):
if obj.__module__ == 'builtins':
return _qualname(obj)
else:
return '%s.%s' % (obj.__module__, _qualname(obj))
else:
return repr(obj)
class AnyMeta(TypingMeta):
"""Metaclass for Any."""
def __new__(cls, name, bases, namespace, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
return self
def __instancecheck__(self, obj):
raise TypeError("Any cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not isinstance(cls, type):
return super().__subclasscheck__(cls) # To TypeError.
return True
class Any(Final, metaclass=AnyMeta, _root=True):
"""Special type indicating an unconstrained type.
- Any object is an instance of Any.
- Any class is a subclass of Any.
- As a special case, Any and object are subclasses of each other.
"""
__slots__ = ()
class TypeVar(TypingMeta, metaclass=TypingMeta, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> Sequence[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) will raise TypeError. However,
issubclass(C, T) is true for any class C, and issubclass(str, A)
and issubclass(bytes, A) are true, and issubclass(int, A) is
false.
Type variables may be marked covariant or contravariant by passing
covariant=True or contravariant=True. See PEP 484 for more
details. By default type variables are invariant.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
"""
def __new__(cls, name, *constraints, bound=None,
covariant=False, contravariant=False):
self = super().__new__(cls, name, (Final,), {}, _root=True)
if covariant and contravariant:
raise ValueError("Bivariant type variables are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
return self
def _has_type_var(self):
return True
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __instancecheck__(self, instance):
raise TypeError("Type variables cannot be used with isinstance().")
def __subclasscheck__(self, cls):
# TODO: Make this raise TypeError too?
if cls is self:
return True
if cls is Any:
return True
if self.__bound__ is not None:
return issubclass(cls, self.__bound__)
if self.__constraints__:
return any(issubclass(cls, c) for c in self.__constraints__)
return True
# Some unconstrained type variables. These are used by the container types.
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# A useful type variable with constraints. This represents string types.
# TODO: What about bytearray, memoryview?
AnyStr = TypeVar('AnyStr', bytes, str)
class UnionMeta(TypingMeta):
"""Metaclass for Union."""
def __new__(cls, name, bases, namespace, parameters=None, _root=False):
if parameters is None:
return super().__new__(cls, name, bases, namespace, _root=_root)
if not isinstance(parameters, tuple):
raise TypeError("Expected parameters=<tuple>")
# Flatten out Union[Union[...], ...] and type-check non-Union args.
params = []
msg = "Union[arg, ...]: each arg must be a type."
for p in parameters:
if isinstance(p, UnionMeta):
params.extend(p.__union_params__)
else:
params.append(_type_check(p, msg))
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
# Weed out subclasses.
# E.g. Union[int, Employee, Manager] == Union[int, Employee].
# If Any or object is present it will be the sole survivor.
# If both Any and object are present, Any wins.
# Never discard type variables, except against Any.
# (In particular, Union[str, AnyStr] != AnyStr.)
all_params = set(params)
for t1 in params:
if t1 is Any:
return Any
if isinstance(t1, TypeVar):
continue
if any(issubclass(t1, t2)
for t2 in all_params - {t1} if not isinstance(t2, TypeVar)):
all_params.remove(t1)
# It's not a union if there's only one type left.
if len(all_params) == 1:
return all_params.pop()
# Create a new class with these params.
self = super().__new__(cls, name, bases, {}, _root=True)
self.__union_params__ = tuple(t for t in params if t in all_params)
self.__union_set_params__ = frozenset(self.__union_params__)
return self
def _eval_type(self, globalns, localns):
p = tuple(_eval_type(t, globalns, localns)
for t in self.__union_params__)
if p == self.__union_params__:
return self
else:
return self.__class__(self.__name__, self.__bases__, {},
p, _root=True)
def _has_type_var(self):
if self.__union_params__:
for t in self.__union_params__:
if _has_type_var(t):
return True
return False
def __repr__(self):
r = super().__repr__()
if self.__union_params__:
r += '[%s]' % (', '.join(_type_repr(t)
for t in self.__union_params__))
return r
def __getitem__(self, parameters):
if self.__union_params__ is not None:
raise TypeError(
"Cannot subscript an existing Union. Use Union[u, t] instead.")
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__), parameters, _root=True)
def __eq__(self, other):
if not isinstance(other, UnionMeta):
return NotImplemented
return self.__union_set_params__ == other.__union_set_params__
def __hash__(self):
return hash(self.__union_set_params__)
def __instancecheck__(self, obj):
raise TypeError("Unions cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if self.__union_params__ is None:
return isinstance(cls, UnionMeta)
elif isinstance(cls, UnionMeta):
if cls.__union_params__ is None:
return False
return all(issubclass(c, self) for c in (cls.__union_params__))
elif isinstance(cls, TypeVar):
if cls in self.__union_params__:
return True
if cls.__constraints__:
return issubclass(Union[cls.__constraints__], self)
return False
else:
return any(issubclass(cls, t) for t in self.__union_params__)
class Union(Final, metaclass=UnionMeta, _root=True):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- When two arguments have a subclass relationship, the least
derived argument is kept, e.g.::
class Employee: pass
class Manager(Employee): pass
Union[int, Employee, Manager] == Union[int, Employee]
Union[Manager, int, Employee] == Union[int, Employee]
Union[Employee, Manager] == Employee
- Corollary: if Any is present it is the sole survivor, e.g.::
Union[int, Any] == Any
- Similar for object::
Union[int, object] == object
- To cut a tie: Union[object, Any] == Union[Any, object] == Any.
- You cannot subclass or instantiate a union.
- You cannot write Union[X][Y] (what would it mean?).
- You can use Optional[X] as a shorthand for Union[X, None].
"""
# Unsubscripted Union type has params set to None.
__union_params__ = None
__union_set_params__ = None
class OptionalMeta(TypingMeta):
"""Metaclass for Optional."""
def __new__(cls, name, bases, namespace, _root=False):
return super().__new__(cls, name, bases, namespace, _root=_root)
def __getitem__(self, arg):
arg = _type_check(arg, "Optional[t] requires a single type.")
return Union[arg, type(None)]
class Optional(Final, metaclass=OptionalMeta, _root=True):
"""Optional type.
Optional[X] is equivalent to Union[X, type(None)].
"""
__slots__ = ()
class TupleMeta(TypingMeta):
"""Metaclass for Tuple."""
def __new__(cls, name, bases, namespace, parameters=None,
use_ellipsis=False, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
self.__tuple_params__ = parameters
self.__tuple_use_ellipsis__ = use_ellipsis
return self
def _has_type_var(self):
if self.__tuple_params__:
for t in self.__tuple_params__:
if _has_type_var(t):
return True
return False
def _eval_type(self, globalns, localns):
tp = self.__tuple_params__
if tp is None:
return self
p = tuple(_eval_type(t, globalns, localns) for t in tp)
if p == self.__tuple_params__:
return self
else:
return self.__class__(self.__name__, self.__bases__, {},
p, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__tuple_params__ is not None:
params = [_type_repr(p) for p in self.__tuple_params__]
if self.__tuple_use_ellipsis__:
params.append('...')
r += '[%s]' % (
', '.join(params))
return r
def __getitem__(self, parameters):
if self.__tuple_params__ is not None:
raise TypeError("Cannot re-parameterize %r" % (self,))
if not isinstance(parameters, tuple):
parameters = (parameters,)
if len(parameters) == 2 and parameters[1] == Ellipsis:
parameters = parameters[:1]
use_ellipsis = True
msg = "Tuple[t, ...]: t must be a type."
else:
use_ellipsis = False
msg = "Tuple[t0, t1, ...]: each t must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__), parameters,
use_ellipsis=use_ellipsis, _root=True)
def __eq__(self, other):
if not isinstance(other, TupleMeta):
return NotImplemented
return self.__tuple_params__ == other.__tuple_params__
def __hash__(self):
return hash(self.__tuple_params__)
def __instancecheck__(self, obj):
raise TypeError("Tuples cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if not isinstance(cls, type):
return super().__subclasscheck__(cls) # To TypeError.
if issubclass(cls, tuple):
return True # Special case.
if not isinstance(cls, TupleMeta):
return super().__subclasscheck__(cls) # False.
if self.__tuple_params__ is None:
return True
if cls.__tuple_params__ is None:
return False # ???
if cls.__tuple_use_ellipsis__ != self.__tuple_use_ellipsis__:
return False
# Covariance.
return (len(self.__tuple_params__) == len(cls.__tuple_params__) and
all(issubclass(x, p)
for x, p in zip(cls.__tuple_params__,
self.__tuple_params__)))
class Tuple(Final, metaclass=TupleMeta, _root=True):
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Sequence[T].
"""
__slots__ = ()
class CallableMeta(TypingMeta):
"""Metaclass for Callable."""
def __new__(cls, name, bases, namespace, _root=False,
args=None, result=None):
if args is None and result is None:
pass # Must be 'class Callable'.
else:
if args is not Ellipsis:
if not isinstance(args, list):
raise TypeError("Callable[args, result]: "
"args must be a list."
" Got %.100r." % (args,))
msg = "Callable[[arg, ...], result]: each arg must be a type."
args = tuple(_type_check(arg, msg) for arg in args)
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
self = super().__new__(cls, name, bases, namespace, _root=_root)
self.__args__ = args
self.__result__ = result
return self
def _has_type_var(self):
if self.__args__:
for t in self.__args__:
if _has_type_var(t):
return True
return _has_type_var(self.__result__)
def _eval_type(self, globalns, localns):
if self.__args__ is None and self.__result__ is None:
return self
if self.__args__ is Ellipsis:
args = self.__args__
else:
args = [_eval_type(t, globalns, localns) for t in self.__args__]
result = _eval_type(self.__result__, globalns, localns)
if args == self.__args__ and result == self.__result__:
return self
else:
return self.__class__(self.__name__, self.__bases__, {},
args=args, result=result, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__args__ is not None or self.__result__ is not None:
if self.__args__ is Ellipsis:
args_r = '...'
else:
args_r = '[%s]' % ', '.join(_type_repr(t)
for t in self.__args__)
r += '[%s, %s]' % (args_r, _type_repr(self.__result__))
return r
def __getitem__(self, parameters):
if self.__args__ is not None or self.__result__ is not None:
raise TypeError("This Callable type is already parameterized.")
if not isinstance(parameters, tuple) or len(parameters) != 2:
raise TypeError(
"Callable must be used as Callable[[arg, ...], result].")
args, result = parameters
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__), _root=True,
args=args, result=result)
def __eq__(self, other):
if not isinstance(other, CallableMeta):
return NotImplemented
return (self.__args__ == other.__args__ and
self.__result__ == other.__result__)
def __hash__(self):
return hash(self.__args__) ^ hash(self.__result__)
def __instancecheck__(self, obj):
# For unparametrized Callable we allow this, because
# typing.Callable should be equivalent to
# collections.abc.Callable.
if self.__args__ is None and self.__result__ is None:
return isinstance(obj, collections_abc.Callable)
else:
raise TypeError("Callable[] cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if not isinstance(cls, CallableMeta):
return super().__subclasscheck__(cls)
if self.__args__ is None and self.__result__ is None:
return True
# We're not doing covariance or contravariance -- this is *invariance*.
return self == cls
class Callable(Final, metaclass=CallableMeta, _root=True):
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
__slots__ = ()
def _gorg(a):
"""Return the farthest origin of a generic class."""
assert isinstance(a, GenericMeta)
while a.__origin__ is not None:
a = a.__origin__
return a
def _geqv(a, b):
"""Return whether two generic classes are equivalent.
The intention is to consider generic class X and any of its
parameterized forms (X[T], X[int], etc.) as equivalent.
However, X is not equivalent to a subclass of X.
The relation is reflexive, symmetric and transitive.
"""
assert isinstance(a, GenericMeta) and isinstance(b, GenericMeta)
# Reduce each to its origin.
return _gorg(a) is _gorg(b)
class GenericMeta(TypingMeta, abc.ABCMeta):
"""Metaclass for generic types."""
# TODO: Constrain more how Generic is used; only a few
# standard patterns should be allowed.
# TODO: Use a more precise rule than matching __name__ to decide
# whether two classes are the same. Also, save the formal
# parameters. (These things are related! A solution lies in
# using origin.)
__extra__ = None
def __new__(cls, name, bases, namespace,
parameters=None, origin=None, extra=None):
if parameters is None:
# Extract parameters from direct base classes. Only
# direct bases are considered and only those that are
# themselves generic, and parameterized with type
# variables. Don't use bases like Any, Union, Tuple,
# Callable or type variables.
params = None
for base in bases:
if isinstance(base, TypingMeta):
if not isinstance(base, GenericMeta):
raise TypeError(
"You cannot inherit from magic class %s" %
repr(base))
if base.__parameters__ is None:
continue # The base is unparameterized.
for bp in base.__parameters__:
if _has_type_var(bp) and not isinstance(bp, TypeVar):
raise TypeError(
"Cannot inherit from a generic class "
"parameterized with "
"non-type-variable %s" % bp)
if params is None:
params = []
if bp not in params:
params.append(bp)
if params is not None:
parameters = tuple(params)
self = super().__new__(cls, name, bases, namespace, _root=True)
self.__parameters__ = parameters
if extra is not None:
self.__extra__ = extra
# Else __extra__ is inherited, eventually from the
# (meta-)class default above.
self.__origin__ = origin
return self
def _has_type_var(self):
if self.__parameters__:
for t in self.__parameters__:
if _has_type_var(t):
return True
return False
def __repr__(self):
r = super().__repr__()
if self.__parameters__ is not None:
r += '[%s]' % (
', '.join(_type_repr(p) for p in self.__parameters__))
return r
def __eq__(self, other):
if not isinstance(other, GenericMeta):
return NotImplemented
return (_geqv(self, other) and
self.__parameters__ == other.__parameters__)
def __hash__(self):
return hash((self.__name__, self.__parameters__))
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
if not params:
raise TypeError("Cannot have empty parameter list")
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if self.__parameters__ is None:
for p in params:
if not isinstance(p, TypeVar):
raise TypeError("Initial parameters must be "
"type variables; got %s" % p)
if len(set(params)) != len(params):
raise TypeError(
"All type variables in Generic[...] must be distinct.")
else:
if len(params) != len(self.__parameters__):
raise TypeError("Cannot change parameter count from %d to %d" %
(len(self.__parameters__), len(params)))
for new, old in zip(params, self.__parameters__):
if isinstance(old, TypeVar):
if not old.__constraints__:
# Substituting for an unconstrained TypeVar is OK.
continue
if issubclass(new, Union[old.__constraints__]):
# Specializing a constrained type variable is OK.
continue
if not issubclass(new, old):
raise TypeError(
"Cannot substitute %s for %s in %s" %
(_type_repr(new), _type_repr(old), self))
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__),
parameters=params,
origin=self,
extra=self.__extra__)
def __instancecheck__(self, instance):
# Since we extend ABC.__subclasscheck__ and
# ABC.__instancecheck__ inlines the cache checking done by the
# latter, we must extend __instancecheck__ too. For simplicity
# we just skip the cache check -- instance checks for generic
# classes are supposed to be rare anyways.
return self.__subclasscheck__(instance.__class__)
def __subclasscheck__(self, cls):
if cls is Any:
return True
if isinstance(cls, GenericMeta):
# For a class C(Generic[T]) where T is co-variant,
# C[X] is a subclass of C[Y] iff X is a subclass of Y.
origin = self.__origin__
if origin is not None and origin is cls.__origin__:
assert len(self.__parameters__) == len(origin.__parameters__)
assert len(cls.__parameters__) == len(origin.__parameters__)
for p_self, p_cls, p_origin in zip(self.__parameters__,
cls.__parameters__,
origin.__parameters__):
if isinstance(p_origin, TypeVar):
if p_origin.__covariant__:
# Covariant -- p_cls must be a subclass of p_self.
if not issubclass(p_cls, p_self):
break
elif p_origin.__contravariant__:
# Contravariant. I think it's the opposite. :-)
if not issubclass(p_self, p_cls):
break
else:
# Invariant -- p_cls and p_self must equal.
if p_self != p_cls:
break
else:
# If the origin's parameter is not a typevar,
# insist on invariance.
if p_self != p_cls:
break
else:
return True
# If we break out of the loop, the superclass gets a chance.
if super().__subclasscheck__(cls):
return True
if self.__extra__ is None or isinstance(cls, GenericMeta):
return False
return issubclass(cls, self.__extra__)
class Generic(metaclass=GenericMeta):
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from an
instantiation of this class with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping, key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
For clarity the type variables may be redefined, e.g.::
X = TypeVar('X')
Y = TypeVar('Y')
def lookup_name(mapping: Mapping[X, Y], key: X, default: Y) -> Y:
# Same body as above.
"""
__slots__ = ()
def __new__(cls, *args, **kwds):
next_in_mro = object
# Look for the last occurrence of Generic or Generic[...].
for i, c in enumerate(cls.__mro__[:-1]):
if isinstance(c, GenericMeta) and _gorg(c) is Generic:
next_in_mro = cls.__mro__[i+1]
return next_in_mro.__new__(_gorg(cls))
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
code = func.__code__
pos_count = code.co_argcount
kw_count = code.co_kwonlyargcount
arg_names = code.co_varnames
kwarg_names = arg_names[pos_count:pos_count + kw_count]
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
def get_type_hints(obj, globalns=None, localns=None):
"""Return type hints for a function or method object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, and if necessary
adds Optional[t] if a default value equal to None is set.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj, and these are also used as the locals. If the
object does not appear to have globals, an exception is raised.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
if globalns is None:
globalns = getattr(obj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
defaults = _get_defaults(obj)
hints = dict(obj.__annotations__)
for name, value in hints.items():
if isinstance(value, str):
value = _ForwardRef(value)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints
# TODO: Also support this as a class decorator.
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods defined in that class (but not
to methods defined in its superclasses or subclasses).
This mutates the function(s) in place.
"""
if isinstance(arg, type):
for obj in arg.__dict__.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
else:
arg.__no_type_check__ = True
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def overload(func):
raise RuntimeError("Overloading is only supported in library stubs")
class _ProtocolMeta(GenericMeta):
"""Internal metaclass for _Protocol.
This exists so _Protocol classes can be generic without deriving
from Generic.
"""
def __instancecheck__(self, obj):
raise TypeError("Protocols cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not self._is_protocol:
# No structural checks since this isn't a protocol.
return NotImplemented
if self is _Protocol:
# Every class is a subclass of the empty protocol.
return True
# Find all attributes defined in the protocol.
attrs = self._get_protocol_attrs()
for attr in attrs:
if not any(attr in d.__dict__ for d in cls.__mro__):
return False
return True
def _get_protocol_attrs(self):
# Get all Protocol base classes.
protocol_bases = []
for c in self.__mro__:
if getattr(c, '_is_protocol', False) and c.__name__ != '_Protocol':
protocol_bases.append(c)
# Get attributes included in protocol.
attrs = set()
for base in protocol_bases:
for attr in base.__dict__.keys():
# Include attributes not defined in any non-protocol bases.
for c in self.__mro__:
if (c is not base and attr in c.__dict__ and
not getattr(c, '_is_protocol', False)):
break
else:
if (not attr.startswith('_abc_') and
attr != '__abstractmethods__' and
attr != '_is_protocol' and
attr != '__dict__' and
attr != '__slots__' and
attr != '_get_protocol_attrs' and
attr != '__parameters__' and
attr != '__origin__' and
attr != '__module__'):
attrs.add(attr)
return attrs
class _Protocol(metaclass=_ProtocolMeta):
"""Internal base class for protocol classes.
This implements a simple-minded structural isinstance check
(similar but more general than the one-offs in collections.abc
such as Hashable).
"""
__slots__ = ()
_is_protocol = True
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Hashable = collections_abc.Hashable # Not generic.
class Iterable(Generic[T_co], extra=collections_abc.Iterable):
__slots__ = ()
class Iterator(Iterable[T_co], extra=collections_abc.Iterator):
__slots__ = ()
class SupportsInt(_Protocol):
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
class SupportsFloat(_Protocol):
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
class SupportsComplex(_Protocol):
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
class SupportsBytes(_Protocol):
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
class SupportsAbs(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
class SupportsRound(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
class Reversible(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __reversed__(self) -> 'Iterator[T_co]':
pass
Sized = collections_abc.Sized # Not generic.
class Container(Generic[T_co], extra=collections_abc.Container):
__slots__ = ()
# Callable was defined earlier.
class AbstractSet(Sized, Iterable[T_co], Container[T_co],
extra=collections_abc.Set):
pass
class MutableSet(AbstractSet[T], extra=collections_abc.MutableSet):
pass
# NOTE: Only the value type is covariant.
class Mapping(Sized, Iterable[KT], Container[KT], Generic[VT_co],
extra=collections_abc.Mapping):
pass
class MutableMapping(Mapping[KT, VT], extra=collections_abc.MutableMapping):
pass
class Sequence(Sized, Iterable[T_co], Container[T_co],
extra=collections_abc.Sequence):
pass
class MutableSequence(Sequence[T], extra=collections_abc.MutableSequence):
pass
class ByteString(Sequence[int], extra=collections_abc.ByteString):
pass
ByteString.register(type(memoryview(b'')))
class List(list, MutableSequence[T]):
def __new__(cls, *args, **kwds):
if _geqv(cls, List):
raise TypeError("Type List cannot be instantiated; "
"use list() instead")
return list.__new__(cls, *args, **kwds)
class Set(set, MutableSet[T]):
def __new__(cls, *args, **kwds):
if _geqv(cls, Set):
raise TypeError("Type Set cannot be instantiated; "
"use set() instead")
return set.__new__(cls, *args, **kwds)
class _FrozenSetMeta(GenericMeta):
"""This metaclass ensures set is not a subclass of FrozenSet.
Without this metaclass, set would be considered a subclass of
FrozenSet, because FrozenSet.__extra__ is collections.abc.Set, and
set is a subclass of that.
"""
def __subclasscheck__(self, cls):
if issubclass(cls, Set):
return False
return super().__subclasscheck__(cls)
class FrozenSet(frozenset, AbstractSet[T_co], metaclass=_FrozenSetMeta):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, FrozenSet):
raise TypeError("Type FrozenSet cannot be instantiated; "
"use frozenset() instead")
return frozenset.__new__(cls, *args, **kwds)
class MappingView(Sized, Iterable[T_co], extra=collections_abc.MappingView):
pass
class KeysView(MappingView[KT], AbstractSet[KT],
extra=collections_abc.KeysView):
pass
# TODO: Enable Set[Tuple[KT, VT_co]] instead of Generic[KT, VT_co].
class ItemsView(MappingView, Generic[KT, VT_co],
extra=collections_abc.ItemsView):
pass
class ValuesView(MappingView[VT_co], extra=collections_abc.ValuesView):
pass
class Dict(dict, MutableMapping[KT, VT]):
def __new__(cls, *args, **kwds):
if _geqv(cls, Dict):
raise TypeError("Type Dict cannot be instantiated; "
"use dict() instead")
return dict.__new__(cls, *args, **kwds)
# Determine what base class to use for Generator.
if hasattr(collections_abc, 'Generator'):
# Sufficiently recent versions of 3.5 have a Generator ABC.
_G_base = collections_abc.Generator
else:
# Fall back on the exact type.
_G_base = types.GeneratorType
class Generator(Iterator[T_co], Generic[T_co, T_contra, V_co],
extra=_G_base):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Generator):
raise TypeError("Type Generator cannot be instantiated; "
"create a subclass instead")
return super().__new__(cls, *args, **kwds)
def NamedTuple(typename, fields):
"""Typed version of namedtuple.
Usage::
Employee = typing.NamedTuple('Employee', [('name', str), 'id', int)])
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has one extra attribute: _field_types,
giving a dict mapping field names to types. (The field names
are in the _fields attribute, which is part of the namedtuple
API.)
"""
fields = [(n, t) for n, t in fields]
cls = collections.namedtuple(typename, [n for n, t in fields])
cls._field_types = dict(fields)
return cls
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@abstractproperty
def mode(self) -> str:
pass
@abstractproperty
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@abstractproperty
def buffer(self) -> BinaryIO:
pass
@abstractproperty
def encoding(self) -> str:
pass
@abstractproperty
def errors(self) -> str:
pass
@abstractproperty
def line_buffering(self) -> bool:
pass
@abstractproperty
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class io:
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _TypeAlias('Pattern', AnyStr, type(stdlib_re.compile('')),
lambda p: p.pattern)
Match = _TypeAlias('Match', AnyStr, type(stdlib_re.match('', '')),
lambda m: m.re.pattern)
class re:
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
| gpl-3.0 |
ayenter/googletest | scripts/pump.py | 2471 | 23673 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
chafique-delli/OpenUpgrade | openerp/addons/test_impex/tests/test_export.py | 71 | 19319 | # -*- coding: utf-8 -*-
import itertools
import openerp.modules.registry
import openerp
from openerp.tests import common
class CreatorCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(CreatorCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(CreatorCase, self).setUp()
self.model = self.registry(self.model_name)
def make(self, value):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': value})
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value, fields=('value',), context=None):
record = self.make(value)
return self.model._BaseModel__export_row(
self.cr, openerp.SUPERUSER_ID, record,
[f.split('/') for f in fields],
context=context)
class test_boolean_field(CreatorCase):
model_name = 'export.boolean'
def test_true(self):
self.assertEqual(
self.export(True),
[[u'True']])
def test_false(self):
""" ``False`` value to boolean fields is unique in being exported as a
(unicode) string, not a boolean
"""
self.assertEqual(
self.export(False),
[[u'False']])
class test_integer_field(CreatorCase):
model_name = 'export.integer'
def test_empty(self):
self.assertEqual(self.model.search(self.cr, openerp.SUPERUSER_ID, []), [],
"Test model should have no records")
def test_0(self):
self.assertEqual(
self.export(0),
[[False]])
def test_basic_value(self):
self.assertEqual(
self.export(42),
[[u'42']])
def test_negative(self):
self.assertEqual(
self.export(-32),
[[u'-32']])
def test_huge(self):
self.assertEqual(
self.export(2**31-1),
[[unicode(2**31-1)]])
class test_float_field(CreatorCase):
model_name = 'export.float'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[False]])
def test_epsilon(self):
self.assertEqual(
self.export(0.000000000027),
[[u'2.7e-11']])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678),
[[u'87654321.4678']])
class test_decimal_field(CreatorCase):
model_name = 'export.decimal'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[False]])
def test_epsilon(self):
""" epsilon gets sliced to 0 due to precision
"""
self.assertEqual(
self.export(0.000000000027),
[[False]])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678), [[u'87654321.468']])
class test_string_field(CreatorCase):
model_name = 'export.string.bounded'
def test_empty(self):
self.assertEqual(
self.export(""),
[[False]])
def test_within_bounds(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_out_of_bounds(self):
self.assertEqual(
self.export("C for Sinking, "
"Java for Drinking, "
"Smalltalk for Thinking. "
"...and Power to the Penguin!"),
[[u"C for Sinking, J"]])
class test_unbound_string_field(CreatorCase):
model_name = 'export.string'
def test_empty(self):
self.assertEqual(
self.export(""),
[[False]])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("We flew down weekly to meet with IBM, but they "
"thought the way to measure software was the amount "
"of code we wrote, when really the better the "
"software, the fewer lines of code."),
[[u"We flew down weekly to meet with IBM, but they thought the "
u"way to measure software was the amount of code we wrote, "
u"when really the better the software, the fewer lines of "
u"code."]])
class test_text(CreatorCase):
model_name = 'export.text'
def test_empty(self):
self.assertEqual(
self.export(""),
[[False]])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("So, `bind' is `let' and monadic programming is"
" equivalent to programming in the A-normal form. That"
" is indeed all there is to monads"),
[[u"So, `bind' is `let' and monadic programming is equivalent to"
u" programming in the A-normal form. That is indeed all there"
u" is to monads"]])
class test_date(CreatorCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07'),
[[u'2011-11-07']])
class test_datetime(CreatorCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07 21:05:48'),
[[u'2011-11-07 21:05:48']])
def test_tz(self):
""" Export ignores the timezone and always exports to UTC
.. note:: on the other hand, export uses user lang for name_get
"""
# NOTE: ignores user timezone, always exports to UTC
self.assertEqual(
self.export('2011-11-07 21:05:48', context={'tz': 'Pacific/Norfolk'}),
[[u'2011-11-07 21:05:48']])
class test_selection(CreatorCase):
model_name = 'export.selection'
translations_fr = [
("Qux", "toto"),
("Bar", "titi"),
("Foo", "tete"),
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_value(self):
""" selections export the *label* for their value
"""
self.assertEqual(
self.export(2),
[[u"Bar"]])
def test_localized_export(self):
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.export(2, context={'lang': 'fr_FR'}),
[[u'Bar']])
class test_selection_function(CreatorCase):
model_name = 'export.selection.function'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_value(self):
# FIXME: selection functions export the *value* itself
self.assertEqual(
self.export(1),
[[u'1']])
self.assertEqual(
self.export(3),
[[u'3']])
# fucking hell
self.assertEqual(
self.export(0),
[[False]])
class test_m2o(CreatorCase):
model_name = 'export.many2one'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
""" Exported value is the name_get of the related object
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id]))[integer_id]
self.assertEqual(
self.export(integer_id),
[[name]])
def test_path(self):
""" Can recursively export fields of m2o via path
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
self.assertEqual(
self.export(integer_id, fields=['value/.id', 'value/value']),
[[unicode(integer_id), u'42']])
def test_external_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
# Expecting the m2o target model name in the external id,
# not this model's name
external_id = u'__export__.export_integer_%d' % integer_id
self.assertEqual(
self.export(integer_id, fields=['value/id']),
[[external_id]])
class test_o2m(CreatorCase):
model_name = 'export.one2many'
commands = [
(0, False, {'value': 4, 'str': 'record1'}),
(0, False, {'value': 42, 'str': 'record2'}),
(0, False, {'value': 36, 'str': 'record3'}),
(0, False, {'value': 4, 'str': 'record4'}),
(0, False, {'value': 13, 'str': 'record5'}),
]
names = [
u'export.one2many.child:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.one2many.child:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.one2many.child:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[
u'4', u','.join(self.names)
]])
def test_multiple_records_id(self):
export = self.export(self.commands, fields=['const', 'value/.id'])
O2M_c = self.registry('export.one2many.child')
ids = O2M_c.browse(self.cr, openerp.SUPERUSER_ID,
O2M_c.search(self.cr, openerp.SUPERUSER_ID, []))
self.assertEqual(
export,
[
['4', str(ids[0].id)],
['', str(ids[1].id)],
['', str(ids[2].id)],
['', str(ids[3].id)],
['', str(ids[4].id)],
])
def test_multiple_records_with_name_before(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value', 'value/value']),
[[ # exports sub-fields of very first o2m
u'4', u','.join(self.names), u'4'
]])
def test_multiple_records_with_name_after(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value', 'value']),
[ # completely ignores name_get request
[u'4', u'4', ''],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
def test_multiple_subfields_neighbour(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/str','value/value']),
[
[u'4', u'record1', u'4'],
['', u'record2', u'42'],
['', u'record3', u'36'],
['', u'record4', u'4'],
['', u'record5', u'13'],
])
def test_multiple_subfields_separated(self):
self.assertEqual(
self.export(self.commands, fields=['value/str', 'const', 'value/value']),
[
[u'record1', u'4', u'4'],
[u'record2', '', u'42'],
[u'record3', '', u'36'],
[u'record4', '', u'4'],
[u'record5', '', u'13'],
])
class test_o2m_multiple(CreatorCase):
model_name = 'export.one2many.multiple'
def make(self, value=None, **values):
if value is not None: values['value'] = value
id = self.model.create(self.cr, openerp.SUPERUSER_ID, values)
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value=None, fields=('child1', 'child2',), context=None, **values):
record = self.make(value, **values)
return self.model._BaseModel__export_row(
self.cr, openerp.SUPERUSER_ID, record,
[f.split('/') for f in fields],
context=context)
def test_empty(self):
self.assertEqual(
self.export(child1=False, child2=False),
[[False, False]])
def test_single_per_side(self):
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})]),
[[False, u'export.one2many.child.2:42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False),
[[u'export.one2many.child.1:43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})]),
[[u'export.one2many.child.1:43', u'export.one2many.child.2:42']])
def test_single_integrate_subfield(self):
fields = ['const', 'child1/value', 'child2/value']
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', False, u'42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False,
fields=fields),
[[u'36', u'43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', u'43', u'42']])
def test_multiple(self):
""" With two "concurrent" o2ms, exports the first line combined, then
exports the rows for the first o2m, then the rows for the second o2m.
"""
fields = ['const', 'child1/value', 'child2/value']
child1 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(), [4, 42, 36, 4, 13])]
child2 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(10), [8, 12, 8, 55, 33, 13])]
self.assertEqual(
self.export(child1=child1, child2=False, fields=fields),
[
[u'36', u'4', False],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
self.assertEqual(
self.export(child1=False, child2=child2, fields=fields),
[
[u'36', False, u'8'],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
self.assertEqual(
self.export(child1=child1, child2=child2, fields=fields),
[
[u'36', u'4', u'8'],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
class test_m2m(CreatorCase):
model_name = 'export.many2many'
commands = [
(0, False, {'value': 4, 'str': 'record000'}),
(0, False, {'value': 42, 'str': 'record001'}),
(0, False, {'value': 36, 'str': 'record010'}),
(0, False, {'value': 4, 'str': 'record011'}),
(0, False, {'value': 13, 'str': 'record100'}),
]
names = [
u'export.many2many.other:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.many2many.other:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.many2many.other:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[ # FIXME: hardcoded comma, import uses config.csv_internal_sep
# resolution: remove configurable csv_internal_sep
u'4', u','.join(self.names)
]])
# essentially same as o2m, so boring
class test_function(CreatorCase):
model_name = 'export.function'
def test_value(self):
""" Exports value normally returned by accessing the function field
"""
self.assertEqual(
self.export(42),
[[u'3']])
| agpl-3.0 |
mrares/incubator-airflow | airflow/www/views.py | 1 | 97991 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.builtins import basestring, unicode
import ast
import logging
import os
import pkg_resources
import socket
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
import math
import json
import bleach
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
redirect, url_for, request, Markup, Response, current_app, render_template, make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2 import escape
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField, validators)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import set_dag_run_state
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
"""Decorator for views requiring data profiling access"""
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {
"state": "ERROR",
"error": ""
}
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
def dag_stats(self):
ds = models.DagStat
session = Session()
ds.update()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
def task_stats(self):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
session = Session()
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
else:
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
try:
ti.task = dag.get_task(ti.task_id)
logs = handler.read(ti)
except AttributeError as e:
logs = ["Task log handler {} does not support read logs.\n{}\n" \
.format(task_log_reader, e.message)]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow administrator for assistance."""
.format(
"- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors.celery_executor import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = datetime.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dags, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = 0
for dag in dags:
count += dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = []
for dag in dags:
tis.extend(dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True))
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
descendants = request.args.get('descendants') == "true"
dags = [dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)]
if descendants:
dags.extend(dag.descendants(
dagbag, task_ids=[task_id], include_downstream=downstream,
include_upstream=upstream, recursive=recursive))
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dags, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
descendants = request.args.get('descendants') == "true"
dag = dagbag.get_dag(dag_id)
dags = [dag]
if descendants:
dags.extend(dag.descendants(dagbag, task_ids=[task_id], recursive=True))
execution_date = dateutil.parser.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dags, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = dateutil.parser.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date,
DR.execution_date >= min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = datetime.utcnow() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.utcnow().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$( document ).trigger('chartload')" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
def tries(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.utcnow()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.utcnow().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else datetime.utcnow()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
session.close()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
# prevent XSS
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
out = str(request.files['file'].read())
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.")
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
try:
return getattr(model, name)
except AirflowException:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
def action_varexport(self, ids):
V = models.Variable
session = settings.Session()
qry = session.query(V).filter(V.id.in_(ids)).all()
session.close()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
def action_new_delete(self, ids):
session = settings.Session()
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
session.close()
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.utcnow()
else:
dr.end_date = datetime.utcnow()
session.commit()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
can_delete = True
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
"""
As a workaround for AIRFLOW-277, this method overrides Flask-Admin's ModelView.action_delete().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.delete_task_instances(ids)
else:
super(TaskInstanceModelView, self).action_delete(ids)
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@provide_session
def delete_task_instances(self, ids, session=None):
try:
TI = models.TaskInstance
count = 0
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
count += session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).delete()
session.commit()
flash("{count} task instances were deleted".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to delete', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = dateutil.parser.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
BludhavenGrayson/repository.BludhavenGrayson | plugin.video.rohwrestling/default.py | 1 | 6158 | '''
ROH Wrestling Add-on
Copyright (C) 2017 BludhavenGrayson
Copyright (C) 2016 rw86
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib
import urllib2
import re
import xbmcplugin
import xbmcgui
import os
import sys
import datetime
import string
import xbmcaddon
addon = xbmcaddon.Addon('plugin.video.rohwrestling')
enableVOD = addon.getSetting('ringside')
fanart = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.rohwrestling', 'fanart.jpg'))
icon = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.rohwrestling', 'icon.png'))
def main():
addLink('[B]Watch the Latest Episode[/B]','http://rohwrestling.com/tv/current',2,icon)
addDir('[B]ROH TV:[/B] 2017','17',4,icon)
addDir('[B]ROH TV:[/B] 2016','16',4,icon)
addDir('[B]ROH TV:[/B] 2015','15',4,icon)
addDir('[B]ROH TV:[/B] 2014','14',4,icon)
if enableVOD == "true":
addDir('[B]Ringside:[/B] Video-On-Demand','url',5,icon)
else:
pass
def getRingside():
addDir('[B]VOD:[/B] 2016','16',3,icon)
addDir('[B]VOD:[/B] 2015','15',3,icon)
addDir('[B]VOD:[/B] 2014','14',3,icon)
addDir('[B]VOD:[/B] 2013','13',3,icon)
addDir('[B]VOD:[/B] 2012','12',3,icon)
addDir('[B]VOD:[/B] 2011','11',3,icon)
addDir('[B]VOD:[/B] 2010','10',3,icon)
addDir('[B]VOD:[/B] 2009','09',3,icon)
addDir('[B]VOD:[/B] 2008','08',3,icon)
addDir('[B]VOD:[/B] 2007','07',3,icon)
addDir('[B]VOD:[/B] 2006','06',3,icon)
addDir('[B]VOD:[/B] 2005','05',3,icon)
addDir('[B]VOD:[/B] 2004','04',3,icon)
addDir('[B]VOD:[/B] 2003','03',3,icon)
addDir('[B]VOD:[/B] 2002','02',3,icon)
def getEpisode(url):
link = open_url(url)
link = link
match=re.compile('.c.ooyala.com/(.+?)/').findall(link)
for url in match:
url = 'http://player.ooyala.com/player/ipad/'+url+'.m3u8'
#name = 'Ring of Honor TV'
play(name,url)
def getVOD(url):
link = open_url('http://rohwrestling.com/vod')
link = link
match=re.compile('<span class="field-content"><a href="(.+?)" class="ppv-product-load">(.+?)/'+url+' (.+?)</a></span>').findall(link)
#match=list(reversed(match))
for url2,date,name in match:
url2 = 'http://rohwrestling.com'+url2
name = name.replace("'","'")
name = date+'/'+url+' '+name
iconimage = icon
addLink(name,url2,2,iconimage)
match2=re.compile('<span class="field-content"><a href="(.+?)" class="ppv-product-load">(.+?).'+url+' (.+?)</a></span>').findall(link)
#match=list(reversed(match))
for url2,date,name in match2:
url2 = 'http://rohwrestling.com'+url2
name = name.replace("'","'")
name = date+'.'+url+' '+name
iconimage = icon
addLink(name,url2,2,iconimage)
def getTV(url):
link = open_url('http://rohwrestling.com/tv/current')
link = link
match=re.compile('<span class="field-content"><a href="(.+?)" class="ppv-product-load">(.+?)/'+url+'</a></span>').findall(link)
#match=list(reversed(match))
for url2, name in match:
url2 = 'http://rohwrestling.com'+url2
name = name+'/'+url
iconimage = icon
addLink(name,url2,2,iconimage)
def play(name,url):
stream_url = url
liz = xbmcgui.ListItem(name, path=stream_url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, liz)
def open_url(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addLink(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name} )
liz.setProperty('fanart_image', fanart)
liz.setProperty("IsPlayable","true")
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
return ok
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name} )
liz.setProperty('fanart_image', fanart)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
params=get_params()
url=None
name=None
mode=None
iconimage=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
if mode==None or url==None or len(url)<1:
print ""
main()
elif mode==1: play(name,url)
elif mode==2: getEpisode(url)
elif mode==3: getVOD(url)
elif mode==4: getTV(url)
elif mode==5: getRingside()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 |
mogoweb/chromium-crosswalk | chrome/test/chromedriver/test/unittest_util.py | 134 | 4320 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for dealing with the python unittest module."""
import fnmatch
import sys
import unittest
class _TextTestResult(unittest._TextTestResult):
"""A test result class that can print formatted text results to a stream.
Results printed in conformance with gtest output format, like:
[ RUN ] autofill.AutofillTest.testAutofillInvalid: "test desc."
[ OK ] autofill.AutofillTest.testAutofillInvalid
[ RUN ] autofill.AutofillTest.testFillProfile: "test desc."
[ OK ] autofill.AutofillTest.testFillProfile
[ RUN ] autofill.AutofillTest.testFillProfileCrazyCharacters: "Test."
[ OK ] autofill.AutofillTest.testFillProfileCrazyCharacters
"""
def __init__(self, stream, descriptions, verbosity):
unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
self._fails = set()
def _GetTestURI(self, test):
return '%s.%s.%s' % (test.__class__.__module__,
test.__class__.__name__,
test._testMethodName)
def getDescription(self, test):
return '%s: "%s"' % (self._GetTestURI(test), test.shortDescription())
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.stream.writeln('[ RUN ] %s' % self.getDescription(test))
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
self.stream.writeln('[ OK ] %s' % self._GetTestURI(test))
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.stream.writeln('[ ERROR ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.stream.writeln('[ FAILED ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def getRetestFilter(self):
return ':'.join(self._fails)
class TextTestRunner(unittest.TextTestRunner):
"""Test Runner for displaying test results in textual format.
Results are displayed in conformance with google test output.
"""
def __init__(self, verbosity=1):
unittest.TextTestRunner.__init__(self, stream=sys.stderr,
verbosity=verbosity)
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def GetTestsFromSuite(suite):
"""Returns all the tests from a given test suite."""
tests = []
for x in suite:
if isinstance(x, unittest.TestSuite):
tests += GetTestsFromSuite(x)
else:
tests += [x]
return tests
def GetTestNamesFromSuite(suite):
"""Returns a list of every test name in the given suite."""
return map(lambda x: GetTestName(x), GetTestsFromSuite(suite))
def GetTestName(test):
"""Gets the test name of the given unittest test."""
return '.'.join([test.__class__.__module__,
test.__class__.__name__,
test._testMethodName])
def FilterTestSuite(suite, gtest_filter):
"""Returns a new filtered tests suite based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
"""
return unittest.TestSuite(FilterTests(GetTestsFromSuite(suite), gtest_filter))
def FilterTests(all_tests, gtest_filter):
"""Returns a filtered list of tests based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
"""
pattern_groups = gtest_filter.split('-')
positive_patterns = pattern_groups[0].split(':')
negative_patterns = None
if len(pattern_groups) > 1:
negative_patterns = pattern_groups[1].split(':')
tests = []
for test in all_tests:
test_name = GetTestName(test)
# Test name must by matched by one positive pattern.
for pattern in positive_patterns:
if fnmatch.fnmatch(test_name, pattern):
break
else:
continue
# Test name must not be matched by any negative patterns.
for pattern in negative_patterns or []:
if fnmatch.fnmatch(test_name, pattern):
break
else:
tests += [test]
return tests
| bsd-3-clause |
Justin-Yuan/Image2Music-Generator | library/jython2.5.3/Lib/encodings/iso2022_jp_1.py | 816 | 1061 | #
# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_1')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-2.0 |
Eric89GXL/mne-python | tutorials/sample-datasets/plot_phantom_4DBTi.py | 17 | 2910 | # -*- coding: utf-8 -*-
"""
.. _tut_phantom_4Dbti:
============================================
4D Neuroimaging/BTi phantom dataset tutorial
============================================
Here we read 4DBTi epochs data obtained with a spherical phantom
using four different dipole locations. For each condition we
compute evoked data and compute dipole fits.
Data are provided by Jean-Michel Badier from MEG center in Marseille, France.
"""
# Authors: Alex Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from mne.datasets import phantom_4dbti
import mne
###############################################################################
# Read data and compute a dipole fit at the peak of the evoked response
data_path = phantom_4dbti.data_path()
raw_fname = op.join(data_path, '%d/e,rfhp1.0Hz')
dipoles = list()
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
t0 = 0.07 # peak of the response
pos = np.empty((4, 3))
ori = np.empty((4, 3))
for ii in range(4):
raw = mne.io.read_raw_bti(raw_fname % (ii + 1,),
rename_channels=False, preload=True)
raw.info['bads'] = ['A173', 'A213', 'A232']
events = mne.find_events(raw, 'TRIGGER', mask=4350, mask_type='not_and')
epochs = mne.Epochs(raw, events=events, event_id=8192, tmin=-0.2, tmax=0.4,
preload=True)
evoked = epochs.average()
evoked.plot(time_unit='s')
cov = mne.compute_covariance(epochs, tmax=0.)
dip = mne.fit_dipole(evoked.copy().crop(t0, t0), cov, sphere)[0]
pos[ii] = dip.pos[0]
ori[ii] = dip.ori[0]
###############################################################################
# Compute localisation errors
actual_pos = 0.01 * np.array([[0.16, 1.61, 5.13],
[0.17, 1.35, 4.15],
[0.16, 1.05, 3.19],
[0.13, 0.80, 2.26]])
actual_pos = np.dot(actual_pos, [[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
errors = 1e3 * np.linalg.norm(actual_pos - pos, axis=1)
print("errors (mm) : %s" % errors)
###############################################################################
# Plot the dipoles in 3D
actual_amp = np.ones(len(dip)) # misc amp to create Dipole instance
actual_gof = np.ones(len(dip)) # misc GOF to create Dipole instance
dip = mne.Dipole(dip.times, pos, actual_amp, ori, actual_gof)
dip_true = mne.Dipole(dip.times, actual_pos, actual_amp, ori, actual_gof)
fig = mne.viz.plot_alignment(evoked.info, bem=sphere, surfaces=[])
# Plot the position of the actual dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip_true, mode='sphere',
color=(1., 0., 0.), fig=fig)
# Plot the position of the estimated dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip, mode='sphere',
color=(1., 1., 0.), fig=fig)
| bsd-3-clause |
coderb0t/CouchPotatoServer | libs/suds/client.py | 150 | 25971 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception, 'No services defined'
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, 'at [%d]' % name
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception, 'No ports defined: %s' % self.__qn
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
| gpl-3.0 |
iemejia/incubator-beam | sdks/python/apache_beam/runners/worker/log_handler.py | 2 | 7153 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Beam fn API log handler."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import logging
import math
import queue
import sys
import threading
import time
import traceback
import grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
# This module is experimental. No backwards-compatibility guarantees.
class FnApiLogRecordHandler(logging.Handler):
"""A handler that writes log records to the fn API."""
# Maximum number of log entries in a single stream request.
_MAX_BATCH_SIZE = 1000
# Used to indicate the end of stream.
_FINISHED = object()
# Size of the queue used to buffer messages. Once full, messages will be
# dropped. If the average log size is 1KB this may use up to 10MB of memory.
_QUEUE_SIZE = 10000
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_fn_api_pb2.LogEntry.Severity.CRITICAL,
logging.ERROR: beam_fn_api_pb2.LogEntry.Severity.ERROR,
logging.WARNING: beam_fn_api_pb2.LogEntry.Severity.WARN,
logging.INFO: beam_fn_api_pb2.LogEntry.Severity.INFO,
logging.DEBUG: beam_fn_api_pb2.LogEntry.Severity.DEBUG,
-float('inf'): beam_fn_api_pb2.LogEntry.Severity.DEBUG,
}
def __init__(self, log_service_descriptor):
super(FnApiLogRecordHandler, self).__init__()
self._alive = True
self._dropped_logs = 0
self._log_entry_queue = queue.Queue(
maxsize=self._QUEUE_SIZE) # type: queue.Queue[beam_fn_api_pb2.LogEntry]
ch = GRPCChannelFactory.insecure_channel(log_service_descriptor.url)
# Make sure the channel is ready to avoid [BEAM-4649]
grpc.channel_ready_future(ch).result(timeout=60)
self._log_channel = grpc.intercept_channel(ch, WorkerIdInterceptor())
self._reader = threading.Thread(
target=lambda: self._read_log_control_messages(),
name='read_log_control_messages')
self._reader.daemon = True
self._reader.start()
def connect(self):
if hasattr(self, '_logging_stub'):
del self._logging_stub
self._logging_stub = beam_fn_api_pb2_grpc.BeamFnLoggingStub(
self._log_channel)
return self._logging_stub.Logging(self._write_log_entries())
def map_log_level(self, level):
try:
return self.LOG_LEVEL_MAP[level]
except KeyError:
return max(
beam_level for python_level,
beam_level in self.LOG_LEVEL_MAP.items() if python_level <= level)
def emit(self, record):
# type: (logging.LogRecord) -> None
log_entry = beam_fn_api_pb2.LogEntry()
log_entry.severity = self.map_log_level(record.levelno)
log_entry.message = self.format(record)
log_entry.thread = record.threadName
log_entry.log_location = '%s:%s' % (
record.pathname or record.module, record.lineno or record.funcName)
(fraction, seconds) = math.modf(record.created)
nanoseconds = 1e9 * fraction
log_entry.timestamp.seconds = int(seconds)
log_entry.timestamp.nanos = int(nanoseconds)
if record.exc_info:
log_entry.trace = ''.join(traceback.format_exception(*record.exc_info))
instruction_id = statesampler.get_current_instruction_id()
if instruction_id:
log_entry.instruction_id = instruction_id
tracker = statesampler.get_current_tracker()
if tracker:
current_state = tracker.current_state()
if (current_state and current_state.name_context and
current_state.name_context.transform_id):
log_entry.transform_id = current_state.name_context.transform_id
try:
self._log_entry_queue.put(log_entry, block=False)
except queue.Full:
self._dropped_logs += 1
def close(self):
"""Flush out all existing log entries and unregister this handler."""
try:
self._alive = False
# Acquiring the handler lock ensures ``emit`` is not run until the lock is
# released.
self.acquire()
self._log_entry_queue.put(self._FINISHED, timeout=5)
# wait on server to close.
self._reader.join()
self.release()
# Unregister this handler.
super(FnApiLogRecordHandler, self).close()
except Exception:
# Log rather than raising exceptions, to avoid clobbering
# underlying errors that may have caused this to close
# prematurely.
logging.error("Error closing the logging channel.", exc_info=True)
def _write_log_entries(self):
done = False
while not done:
log_entries = [self._log_entry_queue.get()]
try:
for _ in range(self._MAX_BATCH_SIZE):
log_entries.append(self._log_entry_queue.get_nowait())
except queue.Empty:
pass
if log_entries[-1] is self._FINISHED:
done = True
log_entries.pop()
if log_entries:
yield beam_fn_api_pb2.LogEntry.List(log_entries=log_entries)
def _read_log_control_messages(self):
# Only reconnect when we are alive.
# We can drop some logs in the unlikely event of logging connection
# dropped(not closed) during termination when we still have logs to be sent.
# This case is unlikely and the chance of reconnection and successful
# transmission of logs is also very less as the process is terminating.
# I choose not to handle this case to avoid un-necessary code complexity.
alive = True # Force at least one connection attempt.
while alive:
# Loop for reconnection.
log_control_iterator = self.connect()
if self._dropped_logs > 0:
logging.warning(
"Dropped %d logs while logging client disconnected",
self._dropped_logs)
self._dropped_logs = 0
try:
for _ in log_control_iterator:
# Loop for consuming messages from server.
# TODO(vikasrk): Handle control messages.
pass
# iterator is closed
return
except Exception as ex:
print(
"Logging client failed: {}... resetting".format(ex),
file=sys.stderr)
# Wait a bit before trying a reconnect
time.sleep(0.5) # 0.5 seconds
alive = self._alive
| apache-2.0 |
jcftang/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_shell.py | 37 | 6595 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, 2016 Ritesh Khadgaray <khadgaray () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: vmware_vm_shell
short_description: Execute a process in VM
description:
- Start a program in a VM without the need for network connection
version_added: 2.1
author: "Ritesh Khadgaray (@ritzk)"
notes:
- Tested on vSphere 5.5
- Only the first match against vm_id is used, even if there are multiple matches
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter:
description:
- The datacenter hosting the VM
- Will help speed up search
required: False
default: None
cluster:
description:
- The cluster hosting the VM
- Will help speed up search
required: False
default: None
vm_id:
description:
- The identification for the VM
required: True
vm_id_type:
description:
- The identification tag for the VM
default: vm_name
choices:
- 'uuid'
- 'dns_name'
- 'inventory_path'
- 'vm_name'
required: False
vm_username:
description:
- The user to connect to the VM.
required: False
default: None
vm_password:
description:
- The password used to login to the VM.
required: False
default: None
vm_shell:
description:
- The absolute path to the program to start. On Linux this is executed via bash.
required: True
vm_shell_args:
description:
- The argument to the program.
required: False
default: None
vm_shell_env:
description:
- Comma separated list of envirnoment variable, specified in the guest OS notation
required: False
default: None
vm_shell_cwd:
description:
- The current working directory of the application from which it will be run
required: False
default: None
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: shell execution
local_action:
module: vmware_vm_shell
hostname: myVSphere
username: myUsername
password: mySecret
datacenter: myDatacenter
vm_id: NameOfVM
vm_username: root
vm_password: superSecret
vm_shell: /bin/echo
vm_shell_args: " $var >> myFile "
vm_shell_env:
- "PATH=/bin"
- "VAR=test"
vm_shell_cwd: "/tmp"
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py
def execute_command(content, vm, vm_username, vm_password, program_path, args="", env=None, cwd=None):
creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
cmdspec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args, envVariables=env, programPath=program_path, workingDirectory=cwd)
cmdpid = content.guestOperationsManager.processManager.StartProgramInGuest(vm=vm, auth=creds, spec=cmdspec)
return cmdpid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter=dict(default=None, type='str'),
cluster=dict(default=None, type='str'),
vm_id=dict(required=True, type='str'),
vm_id_type=dict(default='vm_name', type='str', choices=['inventory_path', 'uuid', 'dns_name', 'vm_name']),
vm_username=dict(required=False, type='str'),
vm_password=dict(required=False, type='str', no_log=True),
vm_shell=dict(required=True, type='str'),
vm_shell_args=dict(default=" ", type='str'),
vm_shell_env=dict(default=None, type='list'),
vm_shell_cwd=dict(default=None, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(changed=False, msg='pyvmomi is required for this module')
try:
p = module.params
datacenter_name = p['datacenter']
cluster_name = p['cluster']
content = connect_to_api(module)
datacenter = None
if datacenter_name:
datacenter = find_datacenter_by_name(content, datacenter_name)
if not datacenter:
module.fail_json(changed=False, msg="datacenter not found")
cluster = None
if cluster_name:
cluster = find_cluster_by_name(content, cluster_name, datacenter)
if not cluster:
module.fail_json(changed=False, msg="cluster not found")
vm = find_vm_by_id(content, p['vm_id'], p['vm_id_type'], datacenter, cluster)
if not vm:
module.fail_json(msg='VM not found')
msg = execute_command(content, vm, p['vm_username'], p['vm_password'],
p['vm_shell'], p['vm_shell_args'], p['vm_shell_env'], p['vm_shell_cwd'])
module.exit_json(changed=True, uuid=vm.summary.config.uuid, msg=msg)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(changed=False, msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(changed=False, msg=method_fault.msg)
except Exception as e:
module.fail_json(changed=False, msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
lukeiwanski/tensorflow-opencl | tensorflow/python/ops/losses/losses_impl.py | 32 | 27543 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Loss operations for use in neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import util
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: `Tensor` of shape `[]`, `[batch_size]` or
`[batch_size, d1, ... dN]`. The `losses` are reduced (`tf.reduce_sum`)
until its dimension matches that of `weights` at which point the reduced
`losses` are element-wise multiplied by `weights` and a final `reduce_sum`
is computed on the result. Conceptually, this operation is similar to
broadcasting (tiling) `weights` to be the same shape as `losses`,
performing an element-wise multiplication, and summing the result. Note,
however, that the dimension matching is right-to-left, not left-to-right;
i.e., the opposite of standard NumPy/Tensorflow broadcasting.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
weighted_losses = math_ops.multiply(losses, weights)
return math_ops.reduce_sum(weighted_losses)
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return _safe_div(total_loss, num_present)
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
`[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
in effect, tiled to match the shape of `losses`. Following this effective
tile, the total number of present elements is the number of non-zero weights.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: `Tensor` of shape `[]`, `[batch_size]` or
`[batch_size, d1, ... dK]`, where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is `True`, the value is returned as a tensor of size
`[batch_size]`. Otherwise, a single scalar tensor is returned.
"""
with ops.name_scope(None, "num_present", (losses, weights)) as scope:
weights = math_ops.to_float(weights)
present = array_ops.where(
math_ops.equal(weights, 0.0),
array_ops.zeros_like(weights),
array_ops.ones_like(weights))
present = weights_broadcast_ops.broadcast_weights(present, losses)
if per_batch:
return math_ops.reduce_sum(
present, axis=math_ops.range(1, array_ops.rank(present)),
keep_dims=True, name=scope)
return math_ops.reduce_sum(present, name=scope)
def compute_weighted_loss(
losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, and must be broadcastable to `losses` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: the scope for the operations performed in computing the loss.
loss_collection: the loss will be added to these collections.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
with ops.name_scope(scope, "weighted_loss", (losses, weights)):
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, losses),)):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(weights)
total_loss = _scale_losses(losses, weights)
num_present = _num_present(losses, weights)
mean_loss = _safe_mean(total_loss, num_present)
# Convert the result back to the input type.
mean_loss = math_ops.cast(mean_loss, input_dtype)
util.add_loss(mean_loss, loss_collection)
return mean_loss
def absolute_difference(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a `Tensor` of
shape `[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "absolute_difference",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope, loss_collection)
def cosine_distance(
labels, predictions, dim=None, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
labels: `Tensor` whose shape matches 'predictions'
predictions: An arbitrary matrix.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
if dim is None:
raise ValueError("`dim` cannot be None.")
with ops.name_scope(scope, "cosine_distance_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(dim,), keep_dims=True)
return compute_weighted_loss(losses, weights, scope, loss_collection)
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a hinge loss to the training procedure.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
logits: The logits, a float tensor.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` of the loss value.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", (logits, labels)) as scope:
logits = math_ops.to_float(logits)
labels = math_ops.to_float(labels)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(losses, weights, scope, loss_collection)
def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "log_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = -math_ops.multiply(
labels,
math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope, loss_collection)
def mean_pairwise_squared_error(labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
labels: The ground truth output tensor, whose shape must match the shape of
`predictions`.
predictions: The predicted outputs, a tensor of size
`[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in
`predictions`.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_pairwise_squared_error",
(predictions, labels, weights)) as scope:
weights = math_ops.to_float(weights)
labels = math_ops.to_float(labels)
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, labels),)):
predictions = math_ops.to_float(predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
diffs = math_ops.subtract(predictions, labels)
reduction_indices = math_ops.range(1, array_ops.rank(diffs))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), reduction_indices=reduction_indices,
keep_dims=True)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch)
sum_diff = math_ops.reduce_sum(
diffs, reduction_indices=reduction_indices, keep_dims=True)
term2 = 2.0 * _safe_div(math_ops.square(sum_diff),
math_ops.square(num_present_per_batch))
loss = _scale_losses(term1 - term2, weights)
mean_loss = array_ops.where(
math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
util.add_loss(mean_loss, loss_collection)
return mean_loss
def mean_squared_error(labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.square(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope, loss_collection)
def sigmoid_cross_entropy(
multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
multi_class_labels: `[batch_size, num_classes]` target integer labels in
`(0, 1)`.
logits: `[batch_size, num_classes]` logits outputs of the network.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
label_smoothing: If greater than `0` then smooth the labels.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None.
"""
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
(logits, multi_class_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope, loss_collection)
def softmax_cross_entropy(
onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
onehot_labels: `[batch_size, num_classes]` target one-hot-encoded labels.
logits: [batch_size, num_classes] logits outputs of the network .
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`onehot_labels`, and must be broadcastable to `onehot_labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `losses`
dimension).
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None.
"""
with ops.name_scope(scope, "softmax_cross_entropy_loss",
(logits, onehot_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(labels=onehot_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope, loss_collection)
# TODO(ptucker): Merge this with similar method in metrics_impl.
def _remove_squeezable_dimensions(
labels, predictions, weights=None, expected_rank_diff=0):
"""Internal version of _remove_squeezable_dimensions which handles weights.
Squeezes `predictions` and `labels` if their ranks differ from expected by
exactly 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
weights: Optional weight `Tensor`. It will be squeezed if it's not scalar,
and its rank is 1 more than the new rank of `labels`.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=expected_rank_diff)
if weights is not None:
weights = ops.convert_to_tensor(weights)
labels_rank = labels.get_shape().ndims
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if (labels_rank is not None) and (weights_rank is not None):
# Use static rank.
rank_diff = weights_rank - labels_rank
if rank_diff == 1:
weights = array_ops.squeeze(weights, [-1])
return labels, predictions, weights
# Use dynamic rank.
rank_diff = array_ops.rank(weights) - array_ops.rank(labels)
if (weights_rank is None) or (
weights_shape.dims[-1].is_compatible_with(1)):
weights = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(weights, [-1]),
lambda: weights)
return labels, predictions, weights
def sparse_softmax_cross_entropy(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` or `float64`.
weights: Coefficients for the loss. This must be scalar or of same rank as
`labels`
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of logits, labels, and weight are incompatible, or
if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
(logits, labels, weights)) as scope:
# As documented above in Args, labels contain class IDs and logits contains
# 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1;
# therefore, expected_rank_diff=1.
labels, logits, weights = _remove_squeezable_dimensions(
labels, logits, weights, expected_rank_diff=1)
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope, loss_collection)
| apache-2.0 |
ofekd/servo | tests/wpt/web-platform-tests/tools/manifest/sourcefile.py | 7 | 12111 | import imp
import os
from six.moves.urllib.parse import urljoin
from fnmatch import fnmatch
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
here = os.path.dirname(__file__)
localpaths = imp.load_source("localpaths", os.path.abspath(os.path.join(here, os.pardir, "localpaths.py")))
import html5lib
from . import vcs
from .item import Stub, ManualTest, WebdriverSpecTest, RefTest, TestharnessTest
from .utils import rel_path_to_url, is_blacklisted, ContextManagerBytesIO, cached_property
wd_pattern = "*.py"
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
class SourceFile(object):
parsers = {"html":lambda x:html5lib.parse(x, treebuilder="etree"),
"xhtml":ElementTree.parse,
"svg":ElementTree.parse}
def __init__(self, tests_root, rel_path, url_base, use_committed=False,
contents=None):
"""Object representing a file in a source tree.
:param tests_root: Path to the root of the source tree
:param rel_path: File path relative to tests_root
:param url_base: Base URL used when converting file paths to urls
:param use_committed: Work with the last committed version of the file
rather than the on-disk version.
:param contents: Byte array of the contents of the file or ``None``.
"""
assert not (use_committed and contents is not None)
self.tests_root = tests_root
self.rel_path = rel_path
self.url_base = url_base
self.use_committed = use_committed
self.contents = contents
self.url = rel_path_to_url(rel_path, url_base)
self.path = os.path.join(tests_root, rel_path)
self.dir_path, self.filename = os.path.split(self.path)
self.name, self.ext = os.path.splitext(self.filename)
self.type_flag = None
if "-" in self.name:
self.type_flag = self.name.rsplit("-", 1)[1]
self.meta_flags = self.name.split(".")[1:]
def __getstate__(self):
# Remove computed properties if we pickle this class
rv = self.__dict__.copy()
if "__cached_properties__" in rv:
cached_properties = rv["__cached_properties__"]
for key in rv.keys():
if key in cached_properties:
del rv[key]
del rv["__cached_properties__"]
return rv
def name_prefix(self, prefix):
"""Check if the filename starts with a given prefix
:param prefix: The prefix to check"""
return self.name.startswith(prefix)
def is_dir(self):
"""Return whether this file represents a directory."""
if self.contents is not None:
return False
return os.path.isdir(self.rel_path)
def open(self):
"""
Return either
* the contents specified in the constructor, if any;
* the contents of the file when last committed, if use_committed is true; or
* a File object opened for reading the file contents.
"""
if self.contents is not None:
file_obj = ContextManagerBytesIO(self.contents)
elif self.use_committed:
git = vcs.get_git_func(os.path.dirname(__file__))
blob = git("show", "HEAD:%s" % self.rel_path)
file_obj = ContextManagerBytesIO(blob)
else:
file_obj = open(self.path, 'rb')
return file_obj
@property
def name_is_non_test(self):
"""Check if the file name matches the conditions for the file to
be a non-test file"""
return (self.is_dir() or
self.name_prefix("MANIFEST") or
self.filename.startswith(".") or
is_blacklisted(self.url))
@property
def name_is_stub(self):
"""Check if the file name matches the conditions for the file to
be a stub file"""
return self.name_prefix("stub-")
@property
def name_is_manual(self):
"""Check if the file name matches the conditions for the file to
be a manual test file"""
return self.type_flag == "manual"
@property
def name_is_multi_global(self):
"""Check if the file name matches the conditions for the file to
be a multi-global js test file"""
return "any" in self.meta_flags and self.ext == ".js"
@property
def name_is_worker(self):
"""Check if the file name matches the conditions for the file to
be a worker js test file"""
return "worker" in self.meta_flags and self.ext == ".js"
@property
def name_is_webdriver(self):
"""Check if the file name matches the conditions for the file to
be a webdriver spec test file"""
# wdspec tests are in subdirectories of /webdriver excluding __init__.py
# files.
rel_dir_tree = self.rel_path.split(os.path.sep)
return (rel_dir_tree[0] == "webdriver" and
len(rel_dir_tree) > 1 and
self.filename != "__init__.py" and
fnmatch(self.filename, wd_pattern))
@property
def name_is_reference(self):
"""Check if the file name matches the conditions for the file to
be a reference file (not a reftest)"""
return self.type_flag in ("ref", "notref")
@property
def markup_type(self):
"""Return the type of markup contained in a file, based on its extension,
or None if it doesn't contain markup"""
ext = self.ext
if not ext:
return None
if ext[0] == ".":
ext = ext[1:]
if ext in ["html", "htm"]:
return "html"
if ext in ["xhtml", "xht", "xml"]:
return "xhtml"
if ext == "svg":
return "svg"
return None
@cached_property
def root(self):
"""Return an ElementTree Element for the root node of the file if it contains
markup, or None if it does not"""
if not self.markup_type:
return None
parser = self.parsers[self.markup_type]
with self.open() as f:
try:
tree = parser(f)
except Exception:
return None
if hasattr(tree, "getroot"):
root = tree.getroot()
else:
root = tree
return root
@cached_property
def timeout_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify timeouts"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='timeout']")
@cached_property
def timeout(self):
"""The timeout of a test or reference file. "long" if the file has an extended timeout
or None otherwise"""
if not self.root:
return
if self.timeout_nodes:
timeout_str = self.timeout_nodes[0].attrib.get("content", None)
if timeout_str and timeout_str.lower() == "long":
return timeout_str
@cached_property
def viewport_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify viewport sizes"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='viewport-size']")
@cached_property
def viewport_size(self):
"""The viewport size of a test or reference file"""
if not self.root:
return None
if not self.viewport_nodes:
return None
return self.viewport_nodes[0].attrib.get("content", None)
@cached_property
def dpi_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify device pixel ratios"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='device-pixel-ratio']")
@cached_property
def dpi(self):
"""The device pixel ratio of a test or reference file"""
if not self.root:
return None
if not self.dpi_nodes:
return None
return self.dpi_nodes[0].attrib.get("content", None)
@cached_property
def testharness_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
testharness.js script"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharness.js']")
@cached_property
def content_is_testharness(self):
"""Boolean indicating whether the file content represents a
testharness.js test"""
if not self.root:
return None
return bool(self.testharness_nodes)
@cached_property
def variant_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
test variant"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='variant']")
@cached_property
def test_variants(self):
rv = []
for element in self.variant_nodes:
if "content" in element.attrib:
variant = element.attrib["content"]
assert variant == "" or variant[0] in ["#", "?"]
rv.append(variant)
if not rv:
rv = [""]
return rv
@cached_property
def reftest_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
to a reftest <link>"""
if not self.root:
return []
match_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='match']")
mismatch_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='mismatch']")
return match_links + mismatch_links
@cached_property
def references(self):
"""List of (ref_url, relation) tuples for any reftest references specified in
the file"""
rv = []
rel_map = {"match": "==", "mismatch": "!="}
for item in self.reftest_nodes:
if "href" in item.attrib:
ref_url = urljoin(self.url, item.attrib["href"])
ref_type = rel_map[item.attrib["rel"]]
rv.append((ref_url, ref_type))
return rv
@cached_property
def content_is_ref_node(self):
"""Boolean indicating whether the file is a non-leaf node in a reftest
graph (i.e. if it contains any <link rel=[mis]match>"""
return bool(self.references)
def manifest_items(self):
"""List of manifest items corresponding to the file. There is typically one
per test, but in the case of reftests a node may have corresponding manifest
items without being a test itself."""
if self.name_is_non_test:
rv = []
elif self.name_is_stub:
rv = [Stub(self, self.url)]
elif self.name_is_manual:
rv = [ManualTest(self, self.url)]
elif self.name_is_multi_global:
rv = [
TestharnessTest(self, replace_end(self.url, ".any.js", ".any.html")),
TestharnessTest(self, replace_end(self.url, ".any.js", ".any.worker")),
]
elif self.name_is_worker:
rv = [TestharnessTest(self, replace_end(self.url, ".worker.js", ".worker"))]
elif self.name_is_webdriver:
rv = [WebdriverSpecTest(self, self.url)]
elif self.content_is_testharness:
rv = []
for variant in self.test_variants:
url = self.url + variant
rv.append(TestharnessTest(self, url, timeout=self.timeout))
elif self.content_is_ref_node:
rv = [RefTest(self, self.url, self.references, timeout=self.timeout,
viewport_size=self.viewport_size, dpi=self.dpi)]
else:
# If nothing else it's a helper file, which we don't have a specific type for
rv = []
return rv
| mpl-2.0 |
simudream/django-rest-framework | tests/test_middleware.py | 79 | 1134 |
from django.conf.urls import url
from django.contrib.auth.models import User
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from rest_framework.views import APIView
urlpatterns = [
url(r'^$', APIView.as_view(authentication_classes=(TokenAuthentication,))),
]
class MyMiddleware(object):
def process_response(self, request, response):
assert hasattr(request, 'user'), '`user` is not set on request'
assert request.user.is_authenticated(), '`user` is not authenticated'
return response
class TestMiddleware(APITestCase):
urls = 'tests.test_middleware'
def test_middleware_can_access_user_when_processing_response(self):
user = User.objects.create_user('john', 'john@example.com', 'password')
key = 'abcd1234'
Token.objects.create(key=key, user=user)
with self.settings(
MIDDLEWARE_CLASSES=('tests.test_middleware.MyMiddleware',)
):
auth = 'Token ' + key
self.client.get('/', HTTP_AUTHORIZATION=auth)
| bsd-2-clause |
h2oai/h2o-2 | py/testdir_hosts/test_GBM_mnist_restart_many.py | 9 | 2499 | import unittest
import random, sys, time, re
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf, h2o_jobs as h2j
DO_DELETE_KEYS_AND_CAUSE_PROBLEM = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(node_count=2,java_heap_GB=7)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GBM_mnist_restart_many(self):
importFolderPath = "mnist"
csvFilename = "train.csv.gz"
timeoutSecs=1800
trialStart = time.time()
for trial in range(10):
# PARSE train****************************************
trainKey = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='smalldata', path=importFolderPath + "/" + csvFilename, schema='put',
hex_key=trainKey, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
# GBM (train)****************************************
params = {
'destination_key': "GBMKEY",
'learn_rate': .1,
'ntrees': 10,
'max_depth': 8,
'min_rows': 1,
'response': 784, # this dataset has the response in the last col (0-9 to check)
# 'ignored_cols_by_name': range(200,784) # only use the first 200 for speed?
}
kwargs = params.copy()
timeoutSecs = 1800
#noPoll -> False when GBM finished
GBMResult = h2o_cmd.runGBM(parseResult=parseResult, noPoll=True, **kwargs)
# if it fails, should happen within 8 secs
time.sleep(8)
h2j.cancelAllJobs()
h2o.check_sandbox_for_errors()
print "Trial %s: GBM start didn't have any errors after 8 seconds. cancelled. Will delete all keys now." % trial
if DO_DELETE_KEYS_AND_CAUSE_PROBLEM:
h2i.delete_keys_at_all_nodes()
# FIX! does the delete really complete fully before we get a response?
# time.sleep(5)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
xyder/kivy-tasks-client | config.py | 1 | 1043 | from views.tasks_list_view import convert_time
use_kv_file = False
form_input_formats = [
{
'label': 'Title',
'key': 'title',
'formatter': None
},
{
'label': 'Description',
'key': 'body',
'formatter': None,
'args': {
'height': 105,
'input_height': 70,
'multiline': True
}
},
{
'label': 'Created Date',
'key': 'created',
'formatter': convert_time
},
{
'label': 'Due Date',
'key': 'due',
'formatter': convert_time
},
{
'label': 'Status',
'key': 'state',
'formatter': None
}
]
# api urls
api_version = '1'
api_url_root = '/api/v%s' % api_version
api_url_start = '%s/tasks' % api_url_root
api_url_item = api_url_start + '/%s'
server_address = 'http://localhost:5000'
| mit |
iutrs/networkmap | explorer/network_objects.py | 1 | 4355 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Auteur : Marc-Antoine Fortier
Date : Mars 2015
"""
import json
HP_DEVICES = ("HP", "Hewlett-Packard", "ProCurve")
JUNIPER_DEVICES = ("Juniper", "JUNOS")
LINUX_DEVICES = ("Linux", "Debian", "Ubuntu")
SUPPORTED_DEVICES = HP_DEVICES + JUNIPER_DEVICES + LINUX_DEVICES
SUPPORTED_TYPES = ("bridge", "Bridge")
class VlanMode():
TRUNK = "Tagged"
ACCESS = "Untagged"
class VlanStatus():
ACTIVE = "Up"
INACTIVE = "Down"
class NetworkObject(object):
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=False, indent=4)
class Vlan(NetworkObject):
def __init__(
self,
identifier=None,
name=None,
mode=None,
status=None):
self.identifier = identifier
self.name = name
self.mode = mode
self.status = status
class VirtualMachine(NetworkObject):
def __init__(
self,
identifier=None,
name=None,
state=None):
self.identifier = identifier
self.name = name
self.state = state
def is_valid(self):
return \
(
all(a is not None for a in [self.identifier, self.name,
self.state]) and
self.identifier != "-" and self.identifier != ""
)
class Trunk(NetworkObject):
def __init__(
self,
name=None,
type=None,
group=None,
ports=None):
self.name = name
self.type = type
self.group = group
self.ports = ports or []
class Interface(NetworkObject):
def __init__(
self,
local_port=None,
remote_port=None,
remote_mac_address=None,
remote_system_name=None,
vlans=None):
self.local_port = local_port
self.remote_port = remote_port
self.remote_mac_address = remote_mac_address
self.remote_system_name = remote_system_name
self.vlans = vlans or {}
def add_vlan(self, vlan):
if not vlan.identifier in self.vlans:
self.vlans[vlan.identifier] = vlan
def is_valid_lldp_interface(self):
return \
(
self.remote_system_name is not None and
self.remote_system_name != ""
)
class DeviceStatus():
NO_AUTH_REQUESTED = "No authentication requested"
AUTH_FAILED = "Authentication failed"
UNREACHABLE = "Unreachable"
class Device(NetworkObject):
def __init__(
self,
mac_address=None,
ip_address=None,
ip_address_type=None,
system_name=None,
system_description=None,
supported_capabilities=None,
enabled_capabilities=None,
interfaces=None,
trunks=None,
virtual_machines=None,
status=None):
self.mac_address = mac_address
self.ip_address = ip_address
self.ip_address_type = ip_address_type
self.system_name = system_name
self.system_description = system_description
self.supported_capabilities = supported_capabilities or ""
self.enabled_capabilities = enabled_capabilities or ""
self.interfaces = interfaces or {}
self.trunks = trunks or {}
self.virtual_machines = virtual_machines or []
self.status = status
def is_valid_lldp_device(self):
return \
(
self.enabled_capabilities is not None and
any(t in self.enabled_capabilities for t in SUPPORTED_TYPES)
and
self.system_description is not None and
any(d in self.system_description for d in SUPPORTED_DEVICES)
)
@property
def type(self):
if self.system_description is None:
return None
type_map = {
HP_DEVICES: "hp",
JUNIPER_DEVICES: "juniper",
LINUX_DEVICES: "linux"}
for patterns, code in type_map.iteritems():
if any(string in self.system_description for string in patterns):
return code
else:
return None
| gpl-3.0 |
TEAM-Gummy/platform_external_chromium_org | chrome/common/extensions/docs/server2/features_bundle.py | 23 | 4255 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiled_file_system import Unicode
from extensions_paths import (
API_FEATURES, JSON_TEMPLATES, MANIFEST_FEATURES, PERMISSION_FEATURES)
import features_utility
from future import Gettable, Future
from third_party.json_schema_compiler.json_parse import Parse
def _AddPlatformsFromDependencies(feature,
api_features,
manifest_features,
permission_features):
features_map = {
'api': api_features,
'manifest': manifest_features,
'permission': permission_features,
}
dependencies = feature.get('dependencies')
if dependencies is None:
return ['apps', 'extensions']
platforms = set()
for dependency in dependencies:
dep_type, dep_name = dependency.split(':')
dependency_features = features_map[dep_type]
dependency_feature = dependency_features.get(dep_name)
# If the dependency can't be resolved, it is inaccessible and therefore
# so is this feature.
if dependency_feature is None:
return []
platforms = platforms.union(dependency_feature['platforms'])
feature['platforms'] = list(platforms)
class _FeaturesCache(object):
def __init__(self, file_system, compiled_fs_factory, *json_paths):
self._cache = compiled_fs_factory.Create(
file_system, self._CreateCache, type(self))
self._text_cache = compiled_fs_factory.ForUnicode(file_system)
self._json_path = json_paths[0]
self._extra_paths = json_paths[1:]
@Unicode
def _CreateCache(self, _, features_json):
extra_path_futures = [self._text_cache.GetFromFile(path)
for path in self._extra_paths]
features = features_utility.Parse(Parse(features_json))
for path_future in extra_path_futures:
extra_json = path_future.Get()
features = features_utility.MergedWith(
features_utility.Parse(Parse(extra_json)), features)
return features
def GetFeatures(self):
if self._json_path is None:
return Future(value={})
return self._cache.GetFromFile(self._json_path)
class FeaturesBundle(object):
'''Provides access to properties of API, Manifest, and Permission features.
'''
def __init__(self, file_system, compiled_fs_factory, object_store_creator):
self._api_cache = _FeaturesCache(
file_system,
compiled_fs_factory,
API_FEATURES)
self._manifest_cache = _FeaturesCache(
file_system,
compiled_fs_factory,
MANIFEST_FEATURES,
'%s/manifest.json' % JSON_TEMPLATES)
self._permission_cache = _FeaturesCache(
file_system,
compiled_fs_factory,
PERMISSION_FEATURES,
'%s/permissions.json' % JSON_TEMPLATES)
self._object_store = object_store_creator.Create(_FeaturesCache, 'features')
def GetPermissionFeatures(self):
return self._permission_cache.GetFeatures()
def GetManifestFeatures(self):
return self._manifest_cache.GetFeatures()
def GetAPIFeatures(self):
api_features = self._object_store.Get('api_features').Get()
if api_features is not None:
return Future(value=api_features)
api_features_future = self._api_cache.GetFeatures()
manifest_features_future = self._manifest_cache.GetFeatures()
permission_features_future = self._permission_cache.GetFeatures()
def resolve():
api_features = api_features_future.Get()
manifest_features = manifest_features_future.Get()
permission_features = permission_features_future.Get()
# TODO(rockot): Handle inter-API dependencies more gracefully.
# Not yet a problem because there is only one such case (windows -> tabs).
# If we don't store this value before annotating platforms, inter-API
# dependencies will lead to infinite recursion.
for feature in api_features.itervalues():
_AddPlatformsFromDependencies(
feature, api_features, manifest_features, permission_features)
self._object_store.Set('api_features', api_features)
return api_features
return Future(delegate=Gettable(resolve))
| bsd-3-clause |
royalharsh/grpc | src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py | 901 | 1528 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
sandeepsingh3/reveal_experimentation | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| mit |
ConnorGBrewster/servo | tests/wpt/web-platform-tests/referrer-policy/generic/tools/generate.py | 15 | 7360 | #!/usr/bin/env python
from __future__ import print_function
import os, sys, json
from common_paths import *
import spec_validator
import argparse
def expand_test_expansion_pattern(spec_test_expansion, test_expansion_schema):
expansion = {}
for artifact in spec_test_expansion:
artifact_value = spec_test_expansion[artifact]
if artifact_value == '*':
expansion[artifact] = test_expansion_schema[artifact]
elif isinstance(artifact_value, list):
expansion[artifact] = artifact_value
else:
expansion[artifact] = [artifact_value]
return expansion
def permute_expansion(expansion, selection = {}, artifact_index = 0):
artifact_order = ['delivery_method', 'redirection', 'origin',
'source_protocol', 'target_protocol', 'subresource',
'referrer_url', 'name']
if artifact_index >= len(artifact_order):
yield selection
return
artifact_key = artifact_order[artifact_index]
for artifact_value in expansion[artifact_key]:
selection[artifact_key] = artifact_value
for next_selection in permute_expansion(expansion,
selection,
artifact_index + 1):
yield next_selection
def generate_selection(selection, spec, subresource_path,
test_html_template_basename):
selection['spec_name'] = spec['name']
selection['spec_title'] = spec['title']
selection['spec_description'] = spec['description']
selection['spec_specification_url'] = spec['specification_url']
selection['subresource_path'] = subresource_path
# Oddball: it can be None, so in JS it's null.
selection['referrer_policy_json'] = json.dumps(spec['referrer_policy'])
test_filename = test_file_path_pattern % selection
test_directory = os.path.dirname(test_filename)
full_path = os.path.join(spec_directory, test_directory)
test_html_template = get_template(test_html_template_basename)
test_js_template = get_template("test.js.template")
disclaimer_template = get_template('disclaimer.template')
test_description_template = get_template("test_description.template")
html_template_filename = os.path.join(template_directory,
test_html_template_basename)
generated_disclaimer = disclaimer_template \
% {'generating_script_filename': os.path.relpath(__file__,
test_root_directory),
'html_template_filename': os.path.relpath(html_template_filename,
test_root_directory)}
# Adjust the template for the test invoking JS. Indent it to look nice.
selection['generated_disclaimer'] = generated_disclaimer.rstrip()
test_description_template = \
test_description_template.rstrip().replace("\n", "\n" + " " * 33)
selection['test_description'] = test_description_template % selection
# Adjust the template for the test invoking JS. Indent it to look nice.
indent = "\n" + " " * 6;
test_js_template = indent + test_js_template.replace("\n", indent);
selection['test_js'] = test_js_template % selection
# Directory for the test files.
try:
os.makedirs(full_path)
except:
pass
selection['meta_delivery_method'] = ''
if spec['referrer_policy'] != None:
if selection['delivery_method'] == 'meta-referrer':
selection['meta_delivery_method'] = \
'<meta name="referrer" content="%(referrer_policy)s">' % spec
elif selection['delivery_method'] == 'http-rp':
selection['meta_delivery_method'] = \
"<!-- No meta: Referrer policy delivered via HTTP headers. -->"
test_headers_filename = test_filename + ".headers"
with open(test_headers_filename, "w") as f:
f.write('Referrer-Policy: ' + \
'%(referrer_policy)s\n' % spec)
# TODO(kristijanburnik): Limit to WPT origins.
f.write('Access-Control-Allow-Origin: *\n')
elif selection['delivery_method'] == 'attr-referrer':
# attr-referrer is supported by the JS test wrapper.
pass
elif selection['delivery_method'] == 'rel-noreferrer':
# rel=noreferrer is supported by the JS test wrapper.
pass
else:
raise ValueError('Not implemented delivery_method: ' \
+ selection['delivery_method'])
# Obey the lint and pretty format.
if len(selection['meta_delivery_method']) > 0:
selection['meta_delivery_method'] = "\n " + \
selection['meta_delivery_method']
with open(test_filename, 'w') as f:
f.write(test_html_template % selection)
def generate_test_source_files(spec_json, target):
test_expansion_schema = spec_json['test_expansion_schema']
specification = spec_json['specification']
spec_json_js_template = get_template('spec_json.js.template')
with open(generated_spec_json_filename, 'w') as f:
f.write(spec_json_js_template
% {'spec_json': json.dumps(spec_json)})
# Choose a debug/release template depending on the target.
html_template = "test.%s.html.template" % target
# Create list of excluded tests.
exclusion_dict = {}
for excluded_pattern in spec_json['excluded_tests']:
excluded_expansion = \
expand_test_expansion_pattern(excluded_pattern,
test_expansion_schema)
for excluded_selection in permute_expansion(excluded_expansion):
excluded_selection_path = selection_pattern % excluded_selection
exclusion_dict[excluded_selection_path] = True
for spec in specification:
for spec_test_expansion in spec['test_expansion']:
expansion = expand_test_expansion_pattern(spec_test_expansion,
test_expansion_schema)
for selection in permute_expansion(expansion):
selection_path = selection_pattern % selection
if not selection_path in exclusion_dict:
subresource_path = \
spec_json["subresource_path"][selection["subresource"]]
generate_selection(selection,
spec,
subresource_path,
html_template)
else:
print('Excluding selection:', selection_path)
def main(target):
spec_json = load_spec_json();
spec_validator.assert_valid_spec_json(spec_json)
generate_test_source_files(spec_json, target)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test suite generator utility')
parser.add_argument('-t', '--target', type = str,
choices = ("release", "debug"), default = "release",
help = 'Sets the appropriate template for generating tests')
# TODO(kristijanburnik): Add option for the spec_json file.
args = parser.parse_args()
main(args.target)
| mpl-2.0 |
aitoehigie/gidimagic | venv/lib/python2.7/site-packages/requests/packages/urllib3/util/request.py | 780 | 2128 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
| mit |
showerst/openstates | openstates/hi/__init__.py | 1 | 3432 | from billy.utils.fulltext import pdfdata_to_text, text_after_line_numbers
from .bills import HIBillScraper
from .legislators import HILegislatorScraper
from .events import HIEventScraper
settings = dict(SCRAPELIB_TIMEOUT=300)
metadata = dict(
name='Hawaii',
abbreviation='hi',
capitol_timezone='Pacific/Honolulu',
legislature_name='Hawaii State Legislature',
legislature_url='http://www.capitol.hawaii.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
terms = [
{
'name': '2011-2012',
'sessions': [
'2011 Regular Session',
],
'start_year' : 2011,
'end_year' : 2012
},
{
'name': '2013-2014',
'sessions': [
'2013 Regular Session',
'2014 Regular Session',
],
'start_year' : 2013,
'end_year' : 2014
},
{
'name': '2015-2016',
'sessions': [
'2015 Regular Session',
'2016 Regular Session',
],
'start_year' : 2015,
'end_year' : 2016
},
],
session_details={
'2016 Regular Session' : {
'display_name' : '2016 Regular Session',
'_scraped_name' : '2016'
},
'2015 Regular Session' : {
'display_name' : '2015 Regular Session',
'_scraped_name' : '2015'
},
'2014 Regular Session' : {
'display_name' : '2014 Regular Session',
'_scraped_name' : '2014'
},
'2013 Regular Session' : {
'display_name' : '2013 Regular Session',
'_scraped_name' : '2013'
},
'2011 Regular Session' : {
'display_name' : '2011-2012 Regular Session',
# was 2011, now 2012 to make scraper keep working for 2011-2012
'_scraped_name' : '2012'
},
},
feature_flags=['subjects', 'events', 'capitol_maps', 'influenceexplorer'],
capitol_maps=[
{"name": "Chamber Floor",
"url": 'http://static.openstates.org/capmaps/hi/floorchamber.pdf'
},
{"name": "Floor 2",
"url": 'http://static.openstates.org/capmaps/hi/floor2.pdf'
},
{"name": "Floor 3",
"url": 'http://static.openstates.org/capmaps/hi/floor3.pdf'
},
{"name": "Floor 4",
"url": 'http://static.openstates.org/capmaps/hi/floor4.pdf'
},
{"name": "Floor 5",
"url": 'http://static.openstates.org/capmaps/hi/floor5.pdf'
},
],
_ignored_scraped_sessions = [
# ignore odd years after they're over..
'2011',
'2010', '2009', '2008', '2007', '2006',
'2005', '2004', '2003', '2002', '2001',
'2000', '1999'
]
)
def session_list():
# doesn't include current session, we need to change it
from billy.scrape.utils import url_xpath
sessions = url_xpath('http://www.capitol.hawaii.gov/archives/main.aspx',
"//div[@class='roundedrect gradientgray shadow']/a/text()"
)
sessions.remove("Archives Main")
return sessions
def extract_text(doc, data):
if doc['mimetype'] == 'application/pdf':
return text_after_line_numbers(pdfdata_to_text(data))
| gpl-3.0 |
dinhkhanh/trac | trac/ticket/tests/wikisyntax.py | 3 | 14175 | # -*- coding: utf-8 -*-
import unittest
from trac.ticket.model import Ticket
from trac.ticket.roadmap import Milestone
from trac.wiki.tests import formatter
TICKET_TEST_CASES = u"""
============================== ticket: link resolver
ticket:1
ticket:12
ticket:abc
------------------------------
<p>
<a class="new ticket" href="/ticket/1" title="This is the summary (new)">ticket:1</a>
<a class="missing ticket">ticket:12</a>
<a class="missing ticket">ticket:abc</a>
</p>
------------------------------
============================== ticket: link resolver + arguments
ticket:1?format=csv
ticket:1#comment:3
------------------------------
<p>
<a class="new ticket" href="/ticket/1?format=csv" title="This is the summary (new)">ticket:1?format=csv</a>
<a class="new ticket" href="/ticket/1#comment:3" title="This is the summary (new)">ticket:1#comment:3</a>
</p>
------------------------------
============================== ticket: link resolver with ranges
ticket:12-14,33
ticket:12,33?order=created
------------------------------
<p>
<a href="/query?id=12-14%2C33" title="Tickets 12-14, 33">ticket:12-14,33</a>
<a href="/query?id=12%2C33&order=created" title="Tickets 12, 33">ticket:12,33?order=created</a>
</p>
------------------------------
============================== ticket link shorthand form
#1, #2
#12, #abc
------------------------------
<p>
<a class="new ticket" href="/ticket/1" title="This is the summary (new)">#1</a>, <a class="missing ticket">#2</a>
<a class="missing ticket">#12</a>, #abc
</p>
------------------------------
============================== ticket link shorthand form with ranges
#1-5,42
#1,3,5,7
------------------------------
<p>
<a href="/query?id=1-5%2C42" title="Tickets 1-5, 42">#1-5,42</a>
<a href="/query?id=1%2C3%2C5%2C7" title="Tickets 1, 3, 5, 7">#1,3,5,7</a>
</p>
------------------------------
============================== ticket link shorthand form with long ranges (#10111 regression)
#1-123456789012345678901234
------------------------------
<p>
<a href="/query?id=1-123456789012345678901234" title="Tickets 1-123456789012345678901234">#1-123456789012345678901234</a>
</p>
------------------------------
============================== escaping the above
!#1
------------------------------
<p>
#1
</p>
------------------------------
#1
============================== InterTrac for tickets
trac:ticket:2041
[trac:ticket:2041 Trac #2041]
#T2041
#trac2041
------------------------------
<p>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/ticket%3A2041" title="ticket:2041 in Trac's Trac"><span class="icon"></span>trac:ticket:2041</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/ticket%3A2041" title="ticket:2041 in Trac's Trac"><span class="icon"></span>Trac #2041</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/ticket%3A2041" title="ticket:2041 in Trac's Trac"><span class="icon"></span>#T2041</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/ticket%3A2041" title="ticket:2041 in Trac's Trac"><span class="icon"></span>#trac2041</a>
</p>
------------------------------
============================== Ticket InterTrac shorthands
T:#2041
trac:#2041
------------------------------
<p>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/%232041" title="#2041 in Trac's Trac"><span class="icon"></span>T:#2041</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/%232041" title="#2041 in Trac's Trac"><span class="icon"></span>trac:#2041</a>
</p>
------------------------------
============================== ticket syntax with unicode digits
#⁴²
#1-⁵,42
#1,³,5,7
#T²⁰⁴¹
#trac²⁰⁴¹
------------------------------
<p>
#⁴²
<a class="new ticket" href="/ticket/1" title="This is the summary (new)">#1</a>-⁵,42
<a class="new ticket" href="/ticket/1" title="This is the summary (new)">#1</a>,³,5,7
#T²⁰⁴¹
#trac²⁰⁴¹
</p>
------------------------------
""" # "
def ticket_setup(tc):
ticket = Ticket(tc.env)
ticket.values.update({'reporter': 'santa',
'summary': 'This is the summary',
'status': 'new'})
ticket.insert()
def ticket_teardown(tc):
tc.env.reset_db()
REPORT_TEST_CASES = u"""
============================== report link shorthand form
{1}, {2}
{12}, {abc}
------------------------------
<p>
<a class="report" href="/report/1">{1}</a>, <a class="report" href="/report/2">{2}</a>
<a class="report" href="/report/12">{12}</a>, {abc}
</p>
------------------------------
============================== escaping the above
!{1}
------------------------------
<p>
{1}
</p>
------------------------------
{1}
============================== ticket shorthands, not numerical HTML entities
 
------------------------------
<p>
&#1; &#23;
</p>
------------------------------
&#1; &#23;
============================== InterTrac for reports
trac:report:1
[trac:report:1 Trac r1]
{T1}
{trac1}
{trac 1}
------------------------------
<p>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>trac:report:1</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>Trac r1</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>{T1}</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>{trac1}</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>{trac 1}</a>
</p>
------------------------------
============================== report syntax with unicode digits
{⁴²} !{⁴²}
{T⁴²}
{trac⁴²}
------------------------------
<p>
{⁴²} !{⁴²}
{T⁴²}
{trac⁴²}
</p>
------------------------------
""" # '
def report_setup(tc):
pass # TBD
MILESTONE_TEST_CASES = u"""
============================== milestone: link resolver
milestone:foo
[milestone:boo Milestone Boo]
[milestone:roo Milestone Roo]
------------------------------
<p>
<a class="missing milestone" href="/milestone/foo" rel="nofollow">milestone:foo</a>
<a class="milestone" href="/milestone/boo">Milestone Boo</a>
<a class="closed milestone" href="/milestone/roo">Milestone Roo</a>
</p>
------------------------------
============================== milestone: link resolver + arguments
milestone:?action=new
[milestone:boo#KnownIssues Known Issues for 1.0]
------------------------------
<p>
<a class="missing milestone" href="/milestone/?action=new" rel="nofollow">milestone:?action=new</a>
<a class="milestone" href="/milestone/boo#KnownIssues">Known Issues for 1.0</a>
</p>
------------------------------
""" #"
def milestone_setup(tc):
from datetime import datetime
from trac.util.datefmt import utc
boo = Milestone(tc.env)
boo.name = 'boo'
boo.completed = boo.due = None
boo.insert()
roo = Milestone(tc.env)
roo.name = 'roo'
roo.completed = datetime.now(utc)
roo.due = None
roo.insert()
def milestone_teardown(tc):
tc.env.reset_db()
QUERY_TEST_CASES = u"""
============================== query: link resolver
query:?order=priority
query:?order=priority&owner=me
query:?type=résumé
query:status=new|reopened
query:reporter!=
query:reporter=joe|jack&owner=me
query:group=owner
query:verbose=1
query:summary=résumé
------------------------------
<p>
<a class="query" href="/query?order=priority">query:?order=priority</a>
</p>
<p>
<a class="query" href="/query?order=priority&owner=me">query:?order=priority&owner=me</a>
</p>
<p>
<a class="query" href="/query?type=r%C3%A9sum%C3%A9">query:?type=résumé</a>
</p>
<p>
<a class="query" href="/query?status=new&status=reopened&order=priority">query:status=new|reopened</a>
</p>
<p>
<a class="query" href="/query?reporter=!&order=priority">query:reporter!=</a>
</p>
<p>
<a class="query" href="/query?owner=me&reporter=joe&reporter=jack&order=priority">query:reporter=joe|jack&owner=me</a>
</p>
<p>
<a class="query" href="/query?group=owner&order=priority">query:group=owner</a>
</p>
<p>
<a class="query" href="/query?order=priority&row=description">query:verbose=1</a>
</p>
<p>
<a class="query" href="/query?summary=r%C3%A9sum%C3%A9&order=priority">query:summary=résumé</a>
</p>
------------------------------
============================== TicketQuery macro: no results, list form
Reopened tickets: [[TicketQuery(status=reopened)]]
------------------------------
<p>
Reopened tickets: <span class="query_no_results">No results</span>
</p>
------------------------------
============================== TicketQuery macro: no results, count 0
Reopened tickets: [[TicketQuery(status=reopened, format=count)]]
------------------------------
<p>
Reopened tickets: <span class="query_count" title="0 tickets for which status=reopened&max=0&order=id">0</span>
</p>
------------------------------
============================== TicketQuery macro: no results, compact form
Reopened tickets: [[TicketQuery(status=reopened, format=compact)]]
------------------------------
<p>
Reopened tickets: <span class="query_no_results">No results</span>
</p>
------------------------------
============================== TicketQuery macro: one result, list form
New tickets: [[TicketQuery(status=new)]]
------------------------------
<p>
New tickets: </p><div><dl class="wiki compact"><dt><a class="new" href="/ticket/1" title="This is the summary">#1</a></dt><dd>This is the summary</dd></dl></div><p>
</p>
------------------------------
============================== TicketQuery macro: one result, count 1
New tickets: [[TicketQuery(status=new, format=count)]]
------------------------------
<p>
New tickets: <span class="query_count" title="1 tickets for which status=new&max=0&order=id">1</span>
</p>
------------------------------
============================== TicketQuery macro: one result, compact form
New tickets: [[TicketQuery(status=new, format=compact)]]
------------------------------
<p>
New tickets: <span><a class="new" href="/ticket/1" title="This is the summary">#1</a></span>
</p>
------------------------------
"""
QUERY2_TEST_CASES = u"""
============================== TicketQuery macro: two results, list form
New tickets: [[TicketQuery(status=new, order=reporter)]]
------------------------------
<p>
New tickets: </p><div><dl class="wiki compact"><dt><a class="new" href="/ticket/2" title="This is another summary">#2</a></dt><dd>This is another summary</dd><dt><a class="new" href="/ticket/1" title="This is the summary">#1</a></dt><dd>This is the summary</dd></dl></div><p>
</p>
------------------------------
============================== TicketQuery macro: two results, count 2
New tickets: [[TicketQuery(status=new, order=reporter, format=count)]]
------------------------------
<p>
New tickets: <span class="query_count" title="2 tickets for which status=new&max=0&order=reporter">2</span>
</p>
------------------------------
============================== TicketQuery macro: two results, compact form
New tickets: [[TicketQuery(status=new, order=reporter, format=compact)]]
------------------------------
<p>
New tickets: <span><a class="new" href="/ticket/2" title="This is another summary">#2</a>, <a class="new" href="/ticket/1" title="This is the summary">#1</a></span>
</p>
------------------------------
"""
def query2_setup(tc):
ticket = Ticket(tc.env)
ticket.values.update({'reporter': 'santa',
'summary': 'This is the summary',
'status': 'new'})
ticket.insert()
ticket = Ticket(tc.env)
ticket.values.update({'reporter': 'claus',
'summary': 'This is another summary',
'status': 'new'})
ticket.insert()
def query2_teardown(tc):
tc.env.reset_db()
COMMENT_TEST_CASES = u"""
============================== comment: link resolver (deprecated)
comment:ticket:123:2 (deprecated)
[comment:ticket:123:2 see above] (deprecated)
[comment:ticket:123:description see descr] (deprecated)
------------------------------
<p>
<a href="/ticket/123#comment:2" title="Comment 2 for Ticket #123">comment:ticket:123:2</a> (deprecated)
<a href="/ticket/123#comment:2" title="Comment 2 for Ticket #123">see above</a> (deprecated)
<a href="/ticket/123#comment:description" title="Comment description for Ticket #123">see descr</a> (deprecated)
</p>
------------------------------
============================== comment: link resolver
comment:2:ticket:123
[comment:2:ticket:123 see above]
[comment:description:ticket:123 see descr]
------------------------------
<p>
<a href="/ticket/123#comment:2" title="Comment 2 for Ticket #123">comment:2:ticket:123</a>
<a href="/ticket/123#comment:2" title="Comment 2 for Ticket #123">see above</a>
<a href="/ticket/123#comment:description" title="Comment description for Ticket #123">see descr</a>
</p>
------------------------------
""" # "
# NOTE: the following test cases:
#
# comment:2
# [comment:2 see above]
#
# would trigger an error in the workaround code ../api.py, line 235
# As it's a problem with a temp workaround, I think there's no need
# to fix it for now.
def suite():
suite = unittest.TestSuite()
suite.addTest(formatter.suite(TICKET_TEST_CASES, ticket_setup, __file__,
ticket_teardown))
suite.addTest(formatter.suite(REPORT_TEST_CASES, report_setup, __file__))
suite.addTest(formatter.suite(MILESTONE_TEST_CASES, milestone_setup,
__file__, milestone_teardown))
suite.addTest(formatter.suite(QUERY_TEST_CASES, ticket_setup, __file__,
ticket_teardown))
suite.addTest(formatter.suite(QUERY2_TEST_CASES, query2_setup, __file__,
query2_teardown))
suite.addTest(formatter.suite(COMMENT_TEST_CASES, file=__file__))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/tests/modeltests/m2m_intermediary/models.py | 92 | 1086 | """
9. Many-to-many relationships via an intermediary table
For many-to-many relationships that need extra fields on the intermediary
table, use an intermediary model.
In this example, an ``Article`` can have multiple ``Reporter`` objects, and
each ``Article``-``Reporter`` combination (a ``Writer``) has a ``position``
field, which specifies the ``Reporter``'s position for the given article
(e.g. "Staff writer").
"""
from django.db import models
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
def __unicode__(self):
return self.headline
class Writer(models.Model):
reporter = models.ForeignKey(Reporter)
article = models.ForeignKey(Article)
position = models.CharField(max_length=100)
def __unicode__(self):
return u'%s (%s)' % (self.reporter, self.position)
| apache-2.0 |
iftekeriba/softlayer-python | SoftLayer/CLI/template.py | 5 | 2069 | """
SoftLayer.CLI.template
~~~~~~~~~~~~~~~~~~~~~~
Provides functions for loading/parsing and writing template files. Template
files are used for storing CLI arguments in the form of a file to be used
later with the --template option.
:license: MIT, see LICENSE for more details.
"""
import os.path
from SoftLayer import utils
class TemplateCallback(object):
"""Callback to use to populate click arguments with a template."""
def __init__(self, list_args=None):
self.list_args = list_args or []
def __call__(self, ctx, param, value):
if value is None:
return
config = utils.configparser.ConfigParser()
ini_str = '[settings]\n' + open(
os.path.expanduser(value), 'r').read()
ini_fp = utils.StringIO(ini_str)
config.readfp(ini_fp)
# Merge template options with the options passed in
args = {}
for key, value in config.items('settings'):
if key in self.list_args:
value = value.split(',')
if not args.get(key):
args[key] = value
if ctx.default_map is None:
ctx.default_map = {}
ctx.default_map.update(args)
def export_to_template(filename, args, exclude=None):
"""Exports given options to the given filename in INI format.
:param filename: Filename to save options to
:param dict args: Arguments to export
:param list exclude (optional): Exclusion list for options that should not
be exported
"""
exclude = exclude or []
exclude.append('config')
exclude.append('really')
exclude.append('format')
exclude.append('debug')
with open(filename, "w") as template_file:
for k, val in args.items():
if val and k not in exclude:
if isinstance(val, tuple):
val = ','.join(val)
if isinstance(val, list):
val = ','.join(val)
template_file.write('%s=%s\n' % (k, val))
| mit |
da1z/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_referrers.py | 1 | 8753 | import sys
from _pydevd_bundle import pydevd_xml
from os.path import basename
import traceback
try:
from urllib import quote, quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
#===================================================================================================
# print_var_node
#===================================================================================================
def print_var_node(xml_node, stream):
name = xml_node.getAttribute('name')
value = xml_node.getAttribute('value')
val_type = xml_node.getAttribute('type')
found_as = xml_node.getAttribute('found_as')
stream.write('Name: ')
stream.write(unquote_plus(name))
stream.write(', Value: ')
stream.write(unquote_plus(value))
stream.write(', Type: ')
stream.write(unquote_plus(val_type))
if found_as:
stream.write(', Found as: %s' % (unquote_plus(found_as),))
stream.write('\n')
#===================================================================================================
# print_referrers
#===================================================================================================
def print_referrers(obj, stream=None):
if stream is None:
stream = sys.stdout
result = get_referrer_info(obj)
from xml.dom.minidom import parseString
dom = parseString(result)
xml = dom.getElementsByTagName('xml')[0]
for node in xml.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
if node.localName == 'for':
stream.write('Searching references for: ')
for child in node.childNodes:
if child.nodeType == node.TEXT_NODE:
continue
print_var_node(child, stream)
elif node.localName == 'var':
stream.write('Referrer found: ')
print_var_node(node, stream)
else:
sys.stderr.write('Unhandled node: %s\n' % (node,))
return result
#===================================================================================================
# get_referrer_info
#===================================================================================================
def get_referrer_info(searched_obj):
DEBUG = 0
if DEBUG:
sys.stderr.write('Getting referrers info.\n')
try:
try:
if searched_obj is None:
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Skipping getting referrers for None',
additionalInXml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
obj_id = id(searched_obj)
try:
if DEBUG:
sys.stderr.write('Getting referrers...\n')
import gc
referrers = gc.get_referrers(searched_obj)
except:
traceback.print_exc()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Exception raised while trying to get_referrers.',
additionalInXml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
if DEBUG:
sys.stderr.write('Found %s referrers.\n' % (len(referrers),))
curr_frame = sys._getframe()
frame_type = type(curr_frame)
#Ignore this frame and any caller frame of this frame
ignore_frames = {} #Should be a set, but it's not available on all python versions.
while curr_frame is not None:
if basename(curr_frame.f_code.co_filename).startswith('pydev'):
ignore_frames[curr_frame] = 1
curr_frame = curr_frame.f_back
ret = ['<xml>\n']
ret.append('<for>\n')
if DEBUG:
sys.stderr.write('Searching Referrers of obj with id="%s"\n' % (obj_id,))
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Referrers of obj with id="%s"' % (obj_id,)))
ret.append('</for>\n')
all_objects = None
for r in referrers:
try:
if r in ignore_frames:
continue #Skip the references we may add ourselves
except:
pass #Ok: unhashable type checked...
if r is referrers:
continue
r_type = type(r)
r_id = str(id(r))
representation = str(r_type)
found_as = ''
if r_type == frame_type:
if DEBUG:
sys.stderr.write('Found frame referrer: %r\n' % (r,))
for key, val in r.f_locals.items():
if val is searched_obj:
found_as = key
break
elif r_type == dict:
if DEBUG:
sys.stderr.write('Found dict referrer: %r\n' % (r,))
# Try to check if it's a value in the dict (and under which key it was found)
for key, val in r.items():
if val is searched_obj:
found_as = key
if DEBUG:
sys.stderr.write(' Found as %r in dict\n' % (found_as,))
break
#Ok, there's one annoying thing: many times we find it in a dict from an instance,
#but with this we don't directly have the class, only the dict, so, to workaround that
#we iterate over all reachable objects ad check if one of those has the given dict.
if all_objects is None:
all_objects = gc.get_objects()
for x in all_objects:
try:
if getattr(x, '__dict__', None) is r:
r = x
r_type = type(x)
r_id = str(id(r))
representation = str(r_type)
break
except:
pass #Just ignore any error here (i.e.: ReferenceError, etc.)
elif r_type in (tuple, list):
if DEBUG:
sys.stderr.write('Found tuple referrer: %r\n' % (r,))
#Don't use enumerate() because not all Python versions have it.
i = 0
for x in r:
if x is searched_obj:
found_as = '%s[%s]' % (r_type.__name__, i)
if DEBUG:
sys.stderr.write(' Found as %s in tuple: \n' % (found_as,))
break
i += 1
if found_as:
if not isinstance(found_as, str):
found_as = str(found_as)
found_as = ' found_as="%s"' % (pydevd_xml.make_valid_xml_value(found_as),)
ret.append(pydevd_xml.var_to_xml(
r,
representation,
additionalInXml=' id="%s"%s' % (r_id, found_as)))
finally:
if DEBUG:
sys.stderr.write('Done searching for references.\n')
#If we have any exceptions, don't keep dangling references from this frame to any of our objects.
all_objects = None
referrers = None
searched_obj = None
r = None
x = None
key = None
val = None
curr_frame = None
ignore_frames = None
except:
traceback.print_exc()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Error getting referrers for:',
additionalInXml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
ret.append('</xml>')
ret = ''.join(ret)
return ret
| apache-2.0 |
ares/robottelo | tests/foreman/ui/test_setting.py | 2 | 45901 | # -*- encoding: utf-8 -*-
"""Test class for Setting Parameter values
:Requirement: Setting
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_email, gen_string, gen_url
from random import choice, randint
from robottelo.datafactory import filtered_datapoint, valid_data_list
from robottelo.decorators import run_only_on, tier1, stubbed, tier4
from robottelo.test import UITestCase
from robottelo.ui.base import UINoSuchElementError
from robottelo.ui.factory import edit_param
from robottelo.ui.locators import common_locators, tab_locators
from robottelo.ui.session import Session
@filtered_datapoint
def valid_boolean_values():
"""Returns a list of valid boolean values"""
return [
'Yes',
'No',
]
@filtered_datapoint
def valid_settings_values():
"""Returns a list of valid settings values"""
return [
gen_email(gen_string('alpha')),
gen_email(gen_string('alphanumeric')),
gen_email(gen_string('numeric')),
]
@filtered_datapoint
def invalid_foreman_urls():
"""Returns a list of invalid foreman urls"""
return[
'http://\\' + gen_string('alpha') + '.dom.com',
'http://' + gen_string('utf8') + '.dom.com',
'http://' + gen_string('latin1') + '.dom.com',
'http://' + gen_string('html') + '.dom.com',
' '
]
@filtered_datapoint
def invalid_settings_values():
"""Returns a list of invalid settings values"""
return [' ', '-1', 'text', '0']
@filtered_datapoint
def valid_maxtrend_timeout_values():
"""Returns a list of valid maxtrend, timeout values"""
return [
str(randint(10, 99)),
str(randint(10000, 99999)),
]
@filtered_datapoint
def valid_urls():
"""Returns a list of valid urls"""
return [
gen_url(
scheme=choice(('http', 'https')),
subdomain=gen_string('alpha'),
),
gen_url(
scheme=choice(('http', 'https')),
subdomain=gen_string('alphanumeric'),
),
gen_url(
scheme=choice(('http', 'https')),
subdomain=gen_string('numeric'),
),
]
@filtered_datapoint
def valid_login_delegation_values():
"""Returns a list of valid delegation values"""
return [
gen_string('latin1'),
gen_string('utf8'),
gen_string('alpha'),
gen_string('alphanumeric'),
gen_string('numeric')
]
@filtered_datapoint
def invalid_oauth_active_values():
"""Returns a list of invalid oauth_active values"""
return [
'oauth_active',
'oauth_consumer_key',
'oauth_consumer_secret',
'oauth_map_users',
]
@filtered_datapoint
def valid_trusted_puppetmaster_hosts():
"""Returns a list of valid trusted puppetmaster hosts"""
return [
'[ ' + gen_string('alpha') + ' ]',
'[ ' + gen_string('alphanumeric') + ' ]',
'[ ' + gen_string('numeric') + ' ]'
]
@filtered_datapoint
def valid_token_duration():
"""Returns a list of valid token durations"""
return ['90', '0']
@filtered_datapoint
def invalid_token_duration():
"""Returns a list of invalid token durations"""
return [' ', '-1', 'text']
class SettingTestCase(UITestCase):
"""Implements Boundary tests for Settings menu"""
def setUp(self):
"""Initialize test variables"""
super(SettingTestCase, self).setUp()
self.original_value = None
self.param_name = None
self.saved_element = None
self.tab_locator = None
self.value_type = None
def tearDown(self):
"""Revert the setting to its default value"""
if self.original_value: # do nothing for skipped test
if self.saved_element != self.original_value:
with Session(self) as session:
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
value_type=self.value_type,
param_value=self.original_value,
)
super(SettingTestCase, self).tearDown()
@tier1
def test_positive_update_authorize_login_delegation_param(self):
"""Updates parameter "authorize_login_delegation" under Auth tab
:id: 0b752f6a-5987-483a-9cef-2d02fa42fe73
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = 'authorize_login_delegation'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_administrator_param(self):
"""Updates parameter "administrator" under General tab
:id: ecab6d51-ad29-4904-bc04-e62673ab1028
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'administrator'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_settings_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
value_type=self.value_type,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_authorize_login_delegation_api_param(self):
"""Updates parameter "authorize_login_delegation_api" under Auth tab
:id: 1dc39d96-a0e3-4d2e-aeb8-14aedab2ebe3
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = 'authorize_login_delegation_api'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_negative_update_entries_per_page_param(self):
"""Updates parameter "entries_per_page" under General tab with
invalid values
:id: b6bb39e2-797e-43e4-9629-d319c62992a4
:expectedresults: Parameter is not updated
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'entries_per_page'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in invalid_settings_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['haserror']))
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertNotEqual(param_value, self.saved_element)
@tier1
def test_positive_update_entries_per_page_param(self):
"""Updates parameter "entries_per_page" under General tab
:id: e41933c8-d835-4126-a356-a186c8e9013f
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
param_value = str(randint(30, 1000))
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'entries_per_page'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_email_reply_address_param(self):
"""Updates parameter "email_reply_address" under General tab
:id: 274eaa6d-a6ba-4dbe-a843-c3717fbd70ae
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_email']
self.param_name = 'email_reply_address'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_settings_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_fix_db_cache_param(self):
"""Updates parameter "fix_db_cache" under General tab
:id: b7f8df0e-9ac8-4075-8955-c895267e424c
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'fix_db_cache'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_use_gravatar_param(self):
"""Updates parameter "use_gravatar" under General tab
:id: 6ea676c1-acb9-495f-9ee7-0a2c14f34ea1
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'use_gravatar'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_negative_update_max_trend_param(self):
"""Updates parameter "max_trend" under General tab with invalid
values
:id: bcc2848d-734a-4b13-80fa-9fd34545cbe7
:expectedresults: Parameter is not updated
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'max_trend'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in invalid_settings_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['haserror']))
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertNotEqual(param_value, self.saved_element)
@tier1
def test_positive_update_max_trend_param(self):
"""Updates parameter "max_trend" under General tab
:id: 6e08bb3b-de48-45b4-b982-7180dbb65ed2
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'max_trend'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_maxtrend_timeout_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_negative_update_idle_timeout_param(self):
"""Updates parameter "idle_timeout" under General tab with
invalid values
:id: 0c46ec21-7402-4241-8b22-5f8afa1f5316
:expectedresults: Parameter is not updated
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = 'idle_timeout'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in invalid_settings_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['haserror']))
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertNotEqual(param_value, self.saved_element)
@tier1
def test_positive_update_idle_timeout_param(self):
"""Updates parameter "idle_timeout" under Auth tab
:id: fd5b2fe0-7124-444b-9f00-fca2b38c52f4
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = 'idle_timeout'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_maxtrend_timeout_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_foreman_url_param(self):
"""Updates parameter "foreman_url" under General tab
:id: e09e95e9-510a-48b6-a59f-5adc0a383ddc
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'foreman_url'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_urls():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_negative_update_foreman_url_param(self):
"""Updates parameter "foreman_url" under General tab
:id: ee450e0a-d02e-40c4-a67e-5508a29dc9c8
:expectedresults: Parameter is not updated
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'foreman_url'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in invalid_foreman_urls():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['haserror']))
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertNotEqual(param_value, self.saved_element)
@tier1
def test_positive_update_login_page_footer_text(self):
"""Updates parameter "Login_page_footer_text" under General tab
:id: 56a983c4-925f-4cbe-8fdb-ce344219d739
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_general']
self.param_name = 'login_text'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_data_list():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@stubbed
@tier1
def test_negative_update_login_page_footer_text(self):
"""Attempt to update parameter "Login_page_footer_text"
with invalid value(long length) under General tab
:id: c76d91e8-a207-43c6-904c-7ca2dae7cd16
:steps:
1. Navigate to Administer -> settings
2. Click on general tab
3. Input invalid data into login page footer field
:expectedresults: Parameter is not updated
:CaseImportance: Critical
"""
@tier1
def test_positive_update_dynflow_enable_console_param(self):
"""Updates parameter "dynflow_enable_console" under ForemanTasks
tab
:id: 11a710f1-d5fc-48c7-9f31-a92dbbaebc40
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_foremantasks']
self.param_name = 'dynflow_enable_console'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_auth_source_user_autocreate_param(self):
"""Updates parameter
"authorize_login_delegation_auth_source_user_autocreate" under Auth tab
:id: 82137c0c-1cf5-445d-87fe-1ff80a12df3c
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = ('authorize_login_delegation_auth_source_user'
'_autocreate')
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_login_delegation_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_login_delegation_logout_url_param(self):
"""Updates parameter "login_delegation_logout_url" under Auth
tab
:id: 67b32c5f-7e8e-4ba7-ab29-9af2ac3660a9
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = 'login_delegation_logout_url'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_urls():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_negative_update_oauth_active_param(self):
"""Read-only param "oauth_active" under Auth tab shouldn't be
updated
:id: e69d791a-e5c4-4f42-b5dd-c9d3bca49673
:expectedresults: Parameter is not editable
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
with Session(self) as session:
for param_name in invalid_oauth_active_values():
with self.subTest(param_name):
with self.assertRaises(UINoSuchElementError) as context:
edit_param(
session,
tab_locator=self.tab_locator,
param_name=param_name,
)
self.assertEqual(
context.exception.message,
'Could not find edit button to update selected param'
)
@tier1
def test_positive_update_require_ssl_smart_proxies_param(self):
"""Updates parameter "require_ssl_smart_proxies" under Auth tab
:id: 79d5bb5f-6bec-4c1c-b68e-6727aeb04614
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = 'require_ssl_smart_proxies'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_restrict_registered_smart_proxies_param(self):
"""Updates parameter "restrict_registered_smart_proxies" under
Auth tab
:id: 7dbcf471-3cee-4718-a316-18da6c4c1ef0
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = 'restrict_registered_smart_proxies'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_positive_update_trusted_puppetmaster_hosts_param(self):
"""Updates parameter "trusted_puppetmaster_hosts" under Auth tab
:id: 18596dbc-7e2a-426c-bd1a-338a31ba6e97
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = 'trusted_puppetmaster_hosts'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_trusted_puppetmaster_hosts():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@tier1
def test_negative_update_trusted_puppetmaster_hosts_param(self):
"""Updates parameter "trusted_puppetmaster_hosts" under Auth tab
:id: 23af2612-1291-41a1-8002-87263e39bdbe
:expectedresults: Parameter is not updated
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_auth']
self.param_name = 'trusted_puppetmaster_hosts'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in invalid_settings_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['haserror']))
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertNotEqual(param_value, self.saved_element)
@tier1
def test_positive_update_ignore_puppet_facts_for_provisioning_param(self):
"""Updates parameter "ignore_puppet_facts_for_provisioning" under
Provisioning tab
:id: 71cb4779-7982-43b6-ab65-7198ec193941
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_provisioning']
self.param_name = 'ignore_puppet_facts_for_provisioning'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@run_only_on('sat')
@tier1
def test_positive_update_manage_puppetca_param(self):
"""Updates parameter "manage_puppetca" under Provisioning tab
:id: 2f652441-6beb-40c0-9fb3-f0b835d06ca7
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_provisioning']
self.param_name = 'manage_puppetca'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@run_only_on('sat')
@tier1
def test_positive_update_query_local_nameservers_param(self):
"""Updates parameter "query_local_nameservers" under
Provisioning tab
:id: 643960f4-121c-44f3-a5e8-00b9cf66ff99
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_provisioning']
self.param_name = 'query_local_nameservers'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@run_only_on('sat')
@tier1
def test_positive_update_safemode_render_param(self):
"""Updates parameter "safemode_render" under Provisioning tab
:id: 4762a89a-2ebe-4834-b44f-f74888e609bb
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_provisioning']
self.param_name = 'safemode_render'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_boolean_values():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@run_only_on('sat')
@tier1
def test_negative_update_token_duration_param(self):
"""Updates parameter "token_duration" under Provisioning tab
with invalid values
:id: a1d18ba3-a14f-47ab-82fb-1249abc7b076
:expectedresults: Parameter is not updated
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_provisioning']
self.param_name = 'token_duration'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in invalid_token_duration():
with self.subTest(param_value):
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['haserror']))
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertNotEqual(param_value, self.saved_element)
@run_only_on('sat')
@tier1
def test_positive_update_token_duration_param(self):
"""Updates param "token_duration" under Provisioning tab
:id: a200b578-4463-444b-bed1-82e540a77529
:expectedresults: Parameter is updated successfully
:CaseImportance: Critical
"""
self.tab_locator = tab_locators['settings.tab_provisioning']
self.param_name = 'token_duration'
with Session(self) as session:
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
for param_value in valid_token_duration():
with self.subTest(param_value):
self.original_value = self.settings.get_saved_value(
self.tab_locator, self.param_name)
edit_param(
session,
tab_locator=self.tab_locator,
param_name=self.param_name,
param_value=param_value,
)
self.saved_element = self.settings.get_saved_value(
self.tab_locator, self.param_name)
self.assertEqual(param_value, self.saved_element)
@stubbed
@tier1
def test_negative_settings_access_to_non_admin(self):
"""Check non admin users can't access Administer -> Settings tab
:id: cefb64ba-5209-4901-b09a-84a433e5e344
:steps:
1. Login with non admin user
2. Check "Administer" tab is not present
3. Navigate to /settings
4. Check message permission denied is present
:expectedresults: Administer -> Settings tab should not be available to
non admin users
:CaseImportance: Critical
"""
@stubbed
@tier1
def test_positive_update_email_delivery_method_smtp(self):
"""Updating SMTP params on Email tab
:id: 3668e81a-6bb0-4399-b192-cad191f6a167
:steps:
1. Navigate to Administer > Settings > Email tab
2. Update delivery method select interface to SMTP
3. SMTP params configuration:
3.1. SMTP address
3.2. SMTP authentication
3.3. SMTP HELO/EHLO domain
3.4. SMTP StartTLS AUTO
3.5. SMTP OpenSSL verify mode
3.6. SMTP password
3.7. SMTP port
3.8. SMTP username
4. Update "Email reply address" and "Email subject prefix"
5. Click "Test Email" button
6. Check success msg "Email was sent successfully" is shown
7. Check sent email has updated values on sender and subject
accordingly
:expectedresults: Email is sent through SMTP
:CaseImportance: Critical
"""
@stubbed
@tier1
def test_negative_update_email_delivery_method_smtp(self):
"""Updating SMTP params on Email tab fail
:id: d2a40c36-4a7e-45cb-a1b6-750ed49f222b
:steps:
1. Navigate to Administer > Settings > Email tab
2. Update delivery method select interface to SMTP
3. Update SMTP params with invalid configuration:
3.1. SMTP address
3.2. SMTP authentication
3.3. SMTP HELO/EHLO domain
3.4. SMTP password
3.5. SMTP port
3.6. SMTP username
4. Click "Test Email" button
5. Check error msg "Unable to send email, check server log for more
information" is shown
6. Check /var/log/foreman/production.log has error msg related
to email
:expectedresults: Email is not sent through SMTP
:CaseImportance: Critical
"""
@stubbed
@tier1
def test_positive_update_email_delivery_method_sendmail(self):
"""Updating Sendmail params on Email tab
:id: 1651e820-7fc2-4295-acef-68737da8e1e2
:steps:
1. Navigate to Administer > Settings > Email tab
2. Update delivery method select interface to Sendmail
3. Sendmail params configuration:
3.1. Sendmail arguments
3.2. Sendmail location
3.3. Send welcome email
4. Update "Email reply address" and "Email subject prefix"
5. Click "Test Email" button
6. Check success msg "Email was sent successfully" is shown
7. Check sent email has updated values on sender and subject
accordingly
:expectedresults: Email is sent through Sendmail
:CaseImportance: Critical
"""
@stubbed
@tier1
def test_negative_update_email_delivery_method_sendmail(self):
"""Updating Sendmail params on Email tab fail
:id: 72391f12-68dd-4fce-b289-d3876564ce8a
:steps:
1. Navigate to Administer > Settings > Email tab
2. Update delivery method select interface to Sendmail
3. update Sendmail params with invalid configuration:
3.1. Sendmail arguments
3.2. Sendmail location
3.3. Send welcome email
4. Click "Test Email" button
5. Check error msg "Unable to send email, check server log for more
information" is shown
6. Check /var/log/foreman/production.log has error msg related
to email
:expectedresults: Email is not sent through Sendmail
:CaseImportance: Critical
"""
@stubbed
@tier4
def test_positive_email_yaml_config_precedence(self):
"""Check configuration file /etc/foreman/email.yaml takes precedence
over UI. This behaviour will be default until Foreman 1.16. This
behavior can also be changed through --foreman-email-config-method
installer parameter
:id: 17b9fcd9-9b4b-41c9-b9f3-d3bd956c3939
:steps:
1. create a /etc/foreman/email.yaml file with smtp configuration
2. Restart katello service
3. Check only a few parameters are editable:
3.1 : Email reply address
3.2 : Email subject prefix
3.3 : Send welcome email
4. Delete or move email.yaml file
5. Restart katello service
6. Check all parameters on Administer/Settings/Email tab are
editable.
:expectedresults: File configuration takes precedence over ui
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_negative_update_hostname_with_empty_fact(self):
"""Update the Hostname_facts settings without any string(empty values)
:id: 13dbb5d2-f052-42fe-8cbc-9b58fa6553e7
:Steps:
1. Goto settings ->Discovered tab -> Hostname_facts
2. Set empty hostname_facts (without any value)
:expectedresults: Error should be raised on setting empty value for
hostname_facts setting
:caseautomation: notautomated
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_positive_remove_hostname_default_prefix(self):
"""Remove the set(default) prefix from hostname_prefix setting
:id: c609dd46-6104-4710-8171-966d7195674f
:Steps:
1. Goto settings -> Discovered tab -> Hostname_prefix
2. Click on 'X' sign to remove the text and save
:expectedresults: Default set prefix should be removed
:caseautomation: notautomated
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_positive_remove_hostname_default_facts(self):
"""Remove the set(default) fact from hostname_facts setting and
update w/ new fact
:id: 250b8bf4-eca1-44d4-8154-7ec94a5fb16a
:Steps:
1. Goto settings ->Discovered tab -> Hostname_facts
2. Click on 'X' sign to remove the text and update w/ fact UUID
:expectedresults: Default set fact should be removed and a new fact
should be set
:caseautomation: notautomated
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_negative_discover_host_with_invalid_prefix(self):
"""Update the hostname_prefix with invalid string like
-mac, 1mac or ^%$
:id: 46c1c383-8ee7-4152-a6ca-0a049f3f70b0
:Steps:
1. Goto settings -> Discovered tab -> Hostname_prefix
2. Update hostname_prefix starting with '-' or ^&$
:expectedresults: Validation error should be raised on updating
hostname_prefix with invalid string, should start w/ letter
:caseautomation: notautomated
"""
| gpl-3.0 |
jacknjzhou/neutron | neutron/tests/unit/agent/linux/test_ovsdb_monitor.py | 14 | 3944 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import ovsdb_monitor
from neutron.tests import base
class TestOvsdbMonitor(base.BaseTestCase):
def setUp(self):
super(TestOvsdbMonitor, self).setUp()
self.monitor = ovsdb_monitor.OvsdbMonitor('Interface')
def read_output_queues_and_returns_result(self, output_type, output):
with mock.patch.object(self.monitor, '_process') as mock_process:
with mock.patch.object(mock_process, output_type) as mock_file:
with mock.patch.object(mock_file, 'readline') as mock_readline:
mock_readline.return_value = output
func = getattr(self.monitor,
'_read_%s' % output_type,
None)
return func()
def test__read_stdout_returns_none_for_empty_read(self):
result = self.read_output_queues_and_returns_result('stdout', '')
self.assertIsNone(result)
def test__read_stdout_queues_normal_output_to_stdout_queue(self):
output = 'foo'
result = self.read_output_queues_and_returns_result('stdout', output)
self.assertEqual(result, output)
self.assertEqual(self.monitor._stdout_lines.get_nowait(), output)
def test__read_stderr_returns_none(self):
result = self.read_output_queues_and_returns_result('stderr', '')
self.assertIsNone(result)
class TestSimpleInterfaceMonitor(base.BaseTestCase):
def setUp(self):
super(TestSimpleInterfaceMonitor, self).setUp()
self.monitor = ovsdb_monitor.SimpleInterfaceMonitor()
def test_has_updates_is_false_if_active_with_no_output(self):
target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor'
'.is_active')
with mock.patch(target, return_value=True):
self.assertFalse(self.monitor.has_updates)
def test__kill_sets_data_received_to_false(self):
self.monitor.data_received = True
with mock.patch(
'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._kill'):
self.monitor._kill()
self.assertFalse(self.monitor.data_received)
def test__read_stdout_sets_data_received_and_returns_output(self):
output = 'foo'
with mock.patch(
'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._read_stdout',
return_value=output):
result = self.monitor._read_stdout()
self.assertTrue(self.monitor.data_received)
self.assertEqual(result, output)
def test__read_stdout_does_not_set_data_received_for_empty_ouput(self):
output = None
with mock.patch(
'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._read_stdout',
return_value=output):
self.monitor._read_stdout()
self.assertFalse(self.monitor.data_received)
def test_has_updates_after_calling_get_events_is_false(self):
with mock.patch.object(
self.monitor, 'process_events') as process_events:
self.monitor.new_events = {'added': ['foo'], 'removed': ['foo1']}
self.assertTrue(self.monitor.has_updates)
self.monitor.get_events()
self.assertTrue(process_events.called)
self.assertFalse(self.monitor.has_updates)
| apache-2.0 |
MontpellierRessourcesImagerie/openmicroscopy | components/tools/OmeroWeb/omeroweb/wsgi.py | 12 | 1739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
WSGI config for the OMERO.web project.
Copyright 2014 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
"""
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
# OMERO.web is set up with the "omeroweb" package also on the PYTHONPATH
sys.path.append(os.path.dirname(__file__))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "omeroweb.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "omeroweb.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| gpl-2.0 |
bigswitch/nova | nova/api/openstack/compute/schemas/floating_ips_bulk.py | 93 | 1537 | # Copyright 2014 IBM Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ip_range = {
# TODO(eliqiao) need to find a better pattern
'type': 'string',
'pattern': '^[0-9./a-fA-F]*$',
}
create = {
'type': 'object',
'properties': {
'floating_ips_bulk_create': {
'type': 'object',
'properties': {
'ip_range': ip_range,
'pool': {
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
'interface': {
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
},
'required': ['ip_range'],
'additionalProperties': False,
},
},
'required': ['floating_ips_bulk_create'],
'additionalProperties': False,
}
delete = {
'type': 'object',
'properties': {
'ip_range': ip_range,
},
'required': ['ip_range'],
'additionalProperties': False,
}
| apache-2.0 |
HybridF5/jacket | jacket/tests/compute/unit/virt/test_hardware.py | 1 | 117764 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import uuid
import mock
from oslo_serialization import jsonutils
import six
from jacket import context
from jacket.compute import exception
from jacket.objects import compute
from jacket.objects.compute import base as base_obj
from jacket.objects.compute import fields
from jacket.compute.pci import stats
from jacket.compute import test
from jacket.compute.virt import hardware as hw
class InstanceInfoTests(test.NoDBTestCase):
def test_instance_info_default(self):
ii = hw.InstanceInfo()
self.assertIsNone(ii.state)
self.assertIsNone(ii.id)
self.assertEqual(0, ii.max_mem_kb)
self.assertEqual(0, ii.mem_kb)
self.assertEqual(0, ii.num_cpu)
self.assertEqual(0, ii.cpu_time_ns)
def test_instance_info(self):
ii = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
self.assertEqual('fake-state', ii.state)
self.assertEqual('fake-id', ii.id)
self.assertEqual(1, ii.max_mem_kb)
self.assertEqual(2, ii.mem_kb)
self.assertEqual(3, ii.num_cpu)
self.assertEqual(4, ii.cpu_time_ns)
def test_instance_infoi_equals(self):
ii1 = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
ii2 = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
ii3 = hw.InstanceInfo(state='fake-estat',
max_mem_kb=11,
mem_kb=22,
num_cpu=33,
cpu_time_ns=44,
id='fake-di')
self.assertEqual(ii1, ii2)
self.assertNotEqual(ii1, ii3)
class CpuSetTestCase(test.NoDBTestCase):
def test_get_vcpu_pin_set(self):
self.flags(vcpu_pin_set="1-3,5,^2")
cpuset_ids = hw.get_vcpu_pin_set()
self.assertEqual(set([1, 3, 5]), cpuset_ids)
def test_parse_cpu_spec_none_returns_none(self):
self.flags(vcpu_pin_set=None)
cpuset_ids = hw.get_vcpu_pin_set()
self.assertIsNone(cpuset_ids)
def test_parse_cpu_spec_valid_syntax_works(self):
cpuset_ids = hw.parse_cpu_spec("1")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1,2")
self.assertEqual(set([1, 2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,")
self.assertEqual(set([1, 2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-1")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3")
self.assertEqual(set([1, 2, 3]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1,^2")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-2, ^1")
self.assertEqual(set([2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-3,5,^2")
self.assertEqual(set([1, 3, 5]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5")
self.assertEqual(set([1, 3, 5]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1")
self.assertEqual(set([]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("^0-1")
self.assertEqual(set([]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("0-3,^1-2")
self.assertEqual(set([0, 3]), cpuset_ids)
def test_parse_cpu_spec_invalid_syntax_raises(self):
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
" -1-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3-,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^2^")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^2-")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"--13,^^5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"a-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-a,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,b,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^c")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"3 - 1, 5 , ^ 2 ")
def test_format_cpu_spec(self):
cpus = set([])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("", spec)
cpus = []
spec = hw.format_cpu_spec(cpus)
self.assertEqual("", spec)
cpus = set([1, 3])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1,3", spec)
cpus = [1, 3]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1,3", spec)
cpus = set([1, 2, 4, 6])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1-2,4,6", spec)
cpus = [1, 2, 4, 6]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1-2,4,6", spec)
cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
cpus = set([1, 2, 4, 6])
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("1,2,4,6", spec)
cpus = [1, 2, 4, 6]
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("1,2,4,6", spec)
cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
class VCPUTopologyTest(test.NoDBTestCase):
def test_validate_config(self):
testdata = [
{ # Flavor sets preferred topology only
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {}
},
"expect": (
8, 2, 1, 65536, 65536, 65536
)
},
{ # Image topology overrides flavor
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_max_threads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": (
4, 2, 2, 65536, 65536, 2,
)
},
{ # Partial image topology overrides flavor
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_sockets": "2",
}
},
"expect": (
2, -1, -1, 65536, 65536, 65536,
)
},
{ # Restrict use of threads
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_threads": "2",
}),
"image": {
"properties": {
"hw_cpu_max_threads": "1",
}
},
"expect": (
-1, -1, -1, 65536, 65536, 1,
)
},
{ # Force use of at least two sockets
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {}
},
"expect": (
-1, -1, -1, 65536, 8, 1
)
},
{ # Image limits reduce flavor
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "4",
}
},
"expect": (
-1, -1, -1, 65536, 4, 1
)
},
{ # Image limits kill flavor preferred
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "4",
}
},
"expect": (
-1, -1, -1, 65536, 4, 65536
)
},
{ # Image limits cannot exceed flavor
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "16",
}
},
"expect": exception.ImageVCPULimitsRangeExceeded,
},
{ # Image preferred cannot exceed flavor
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_cores": "16",
}
},
"expect": exception.ImageVCPUTopologyRangeExceeded,
},
]
for topo_test in testdata:
image_meta = compute.ImageMeta.from_dict(topo_test["image"])
if type(topo_test["expect"]) == tuple:
(preferred,
maximum) = hw._get_cpu_topology_constraints(
topo_test["flavor"], image_meta)
self.assertEqual(topo_test["expect"][0], preferred.sockets)
self.assertEqual(topo_test["expect"][1], preferred.cores)
self.assertEqual(topo_test["expect"][2], preferred.threads)
self.assertEqual(topo_test["expect"][3], maximum.sockets)
self.assertEqual(topo_test["expect"][4], maximum.cores)
self.assertEqual(topo_test["expect"][5], maximum.threads)
else:
self.assertRaises(topo_test["expect"],
hw._get_cpu_topology_constraints,
topo_test["flavor"],
image_meta)
def test_possible_topologies(self):
testdata = [
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
[4, 1, 2],
[2, 2, 2],
[1, 4, 2],
]
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1024,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
[4, 1, 2],
[2, 2, 2],
[1, 4, 2],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 1, 2],
]
},
{
"allow_threads": True,
"vcpus": 7,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[7, 1, 1],
[1, 7, 1],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 2,
"maxcores": 1,
"maxthreads": 1,
"expect": exception.ImageVCPULimitsRangeImpossible,
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 2,
"maxcores": 1,
"maxthreads": 4,
"expect": exception.ImageVCPULimitsRangeImpossible,
},
]
for topo_test in testdata:
if type(topo_test["expect"]) == list:
actual = []
for topology in hw._get_possible_cpu_topologies(
topo_test["vcpus"],
compute.VirtCPUTopology(
sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"]):
actual.append([topology.sockets,
topology.cores,
topology.threads])
self.assertEqual(topo_test["expect"], actual)
else:
self.assertRaises(topo_test["expect"],
hw._get_possible_cpu_topologies,
topo_test["vcpus"],
compute.VirtCPUTopology(
sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"])
def test_sorting_topologies(self):
testdata = [
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"sockets": 4,
"cores": 2,
"threads": 1,
"expect": [
[4, 2, 1], # score = 2
[8, 1, 1], # score = 1
[2, 4, 1], # score = 1
[1, 8, 1], # score = 1
[4, 1, 2], # score = 1
[2, 2, 2], # score = 1
[1, 4, 2], # score = 1
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1024,
"maxthreads": 2,
"sockets": -1,
"cores": 4,
"threads": -1,
"expect": [
[2, 4, 1], # score = 1
[1, 4, 2], # score = 1
[8, 1, 1], # score = 0
[4, 2, 1], # score = 0
[1, 8, 1], # score = 0
[4, 1, 2], # score = 0
[2, 2, 2], # score = 0
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"sockets": -1,
"cores": -1,
"threads": 2,
"expect": [
[4, 1, 2], # score = 1
[8, 1, 1], # score = 0
]
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"sockets": -1,
"cores": -1,
"threads": 2,
"expect": [
[8, 1, 1], # score = 0
]
},
]
for topo_test in testdata:
actual = []
possible = hw._get_possible_cpu_topologies(
topo_test["vcpus"],
compute.VirtCPUTopology(sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"])
tops = hw._sort_possible_cpu_topologies(
possible,
compute.VirtCPUTopology(sockets=topo_test["sockets"],
cores=topo_test["cores"],
threads=topo_test["threads"]))
for topology in tops:
actual.append([topology.sockets,
topology.cores,
topology.threads])
self.assertEqual(topo_test["expect"], actual)
def test_best_config(self):
testdata = [
{ # Flavor sets preferred topology only
"allow_threads": True,
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
}),
"image": {
"properties": {}
},
"expect": [8, 2, 1],
},
{ # Image topology overrides flavor
"allow_threads": True,
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_maxthreads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": [4, 2, 2],
},
{ # Image topology overrides flavor
"allow_threads": False,
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_maxthreads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": [8, 2, 1],
},
{ # Partial image topology overrides flavor
"allow_threads": True,
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
}),
"image": {
"properties": {
"hw_cpu_sockets": "2"
}
},
"expect": [2, 8, 1],
},
{ # Restrict use of threads
"allow_threads": True,
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_threads": "1"
}),
"image": {
"properties": {}
},
"expect": [16, 1, 1]
},
{ # Force use of at least two sockets
"allow_threads": True,
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {}
},
"expect": [16, 1, 1]
},
{ # Image limits reduce flavor
"allow_threads": True,
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_sockets": "8",
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_sockets": 4,
}
},
"expect": [4, 4, 1]
},
{ # Image limits kill flavor preferred
"allow_threads": True,
"flavor": compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": 4,
}
},
"expect": [16, 1, 1]
},
{ # NUMA needs threads, only cores requested by flavor
"allow_threads": True,
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_cores": "2",
}),
"image": {
"properties": {
"hw_cpu_max_cores": 2,
}
},
"numa_topology": compute.InstanceNUMATopology(
cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_topology=compute.VirtCPUTopology(
sockets=1, cores=1, threads=2)),
compute.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024)]),
"expect": [1, 2, 2]
},
{ # NUMA needs threads, but more than requested by flavor - the
# least amount of threads wins
"allow_threads": True,
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_threads": "2",
}),
"image": {
"properties": {}
},
"numa_topology": compute.InstanceNUMATopology(
cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=compute.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 2]
},
{ # NUMA needs threads, but more than limit in flavor - the
# least amount of threads which divides into the vcpu
# count wins. So with desired 4, max of 3, and
# vcpu count of 4, we should get 2 threads.
"allow_threads": True,
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_max_sockets": "5",
"hw:cpu_max_cores": "2",
"hw:cpu_max_threads": "3",
}),
"image": {
"properties": {}
},
"numa_topology": compute.InstanceNUMATopology(
cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=compute.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 2]
},
{ # NUMA needs threads, but thread count does not
# divide into flavor vcpu count, so we must
# reduce thread count to closest divisor
"allow_threads": True,
"flavor": compute.Flavor(vcpus=6, memory_mb=2048,
extra_specs={
}),
"image": {
"properties": {}
},
"numa_topology": compute.InstanceNUMATopology(
cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=compute.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 3]
},
{ # NUMA needs different number of threads per cell - the least
# amount of threads wins
"allow_threads": True,
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={}),
"image": {
"properties": {}
},
"numa_topology": compute.InstanceNUMATopology(
cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_topology=compute.VirtCPUTopology(
sockets=1, cores=2, threads=2)),
compute.InstanceNUMACell(
id=1, cpuset=set([4, 5, 6, 7]), memory=1024,
cpu_topology=compute.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [4, 1, 2]
},
]
for topo_test in testdata:
image_meta = compute.ImageMeta.from_dict(topo_test["image"])
topology = hw._get_desirable_cpu_topologies(
topo_test["flavor"],
image_meta,
topo_test["allow_threads"],
topo_test.get("numa_topology"))[0]
self.assertEqual(topo_test["expect"][0], topology.sockets)
self.assertEqual(topo_test["expect"][1], topology.cores)
self.assertEqual(topo_test["expect"][2], topology.threads)
class NUMATopologyTest(test.NoDBTestCase):
def test_topology_constraints(self):
testdata = [
{
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
}),
"image": {
},
"expect": None,
},
{
"flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 2
}),
"image": {
},
"expect": compute.InstanceNUMATopology(cells=
[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
compute.InstanceNUMACell(
id=1, cpuset=set([4, 5, 6, 7]), memory=1024),
]),
},
{
"flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:mem_page_size": 2048
}),
"image": {
},
"expect": compute.InstanceNUMATopology(cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=2048, pagesize=2048)
]),
},
{
# vcpus is not a multiple of nodes, so it
# is an error to not provide cpu/mem mapping
"flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 3
}),
"image": {
},
"expect": exception.ImageNUMATopologyAsymmetric,
},
{
"flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 3,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "4,6",
"hw:numa_mem.1": "512",
"hw:numa_cpus.2": "5,7",
"hw:numa_mem.2": "512",
}),
"image": {
},
"expect": compute.InstanceNUMATopology(cells=
[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
compute.InstanceNUMACell(
id=1, cpuset=set([4, 6]), memory=512),
compute.InstanceNUMACell(
id=2, cpuset=set([5, 7]), memory=512)
]),
},
{
"flavor": compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={
}),
"image": {
"properties": {
"hw_numa_nodes": 3,
"hw_numa_cpus.0": "0-3",
"hw_numa_mem.0": "1024",
"hw_numa_cpus.1": "4,6",
"hw_numa_mem.1": "512",
"hw_numa_cpus.2": "5,7",
"hw_numa_mem.2": "512",
},
},
"expect": compute.InstanceNUMATopology(cells=
[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
compute.InstanceNUMACell(
id=1, cpuset=set([4, 6]), memory=512),
compute.InstanceNUMACell(
id=2, cpuset=set([5, 7]), memory=512)
]),
},
{
# Request a CPU that is out of range
# wrt vCPU count
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 1,
"hw:numa_cpus.0": "0-16",
"hw:numa_mem.0": "2048",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUOutOfRange,
},
{
# Request the same CPU in two nodes
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-7",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "0-7",
"hw:numa_mem.1": "1024",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUDuplicates,
},
{
# Request with some CPUs not assigned
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-2",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "3-4",
"hw:numa_mem.1": "1024",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUsUnassigned,
},
{
# Request too little memory vs flavor total
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "512",
"hw:numa_cpus.1": "4-7",
"hw:numa_mem.1": "512",
}),
"image": {
},
"expect": exception.ImageNUMATopologyMemoryOutOfRange,
},
{
# Request too much memory vs flavor total
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "1576",
"hw:numa_cpus.1": "4-7",
"hw:numa_mem.1": "1576",
}),
"image": {
},
"expect": exception.ImageNUMATopologyMemoryOutOfRange,
},
{
# Request missing mem.0
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.1": "1576",
}),
"image": {
},
"expect": exception.ImageNUMATopologyIncomplete,
},
{
# Request missing cpu.0
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_mem.0": "1576",
"hw:numa_cpus.1": "4-7",
}),
"image": {
},
"expect": exception.ImageNUMATopologyIncomplete,
},
{
# Image attempts to override flavor
"flavor": compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
}),
"image": {
"properties": {
"hw_numa_nodes": 4}
},
"expect": exception.ImageNUMATopologyForbidden,
},
{
# NUMA + CPU pinning requested in the flavor
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED
}),
"image": {
},
"expect": compute.InstanceNUMATopology(cells=
[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
compute.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
},
{
# no NUMA + CPU pinning requested in the flavor
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED
}),
"image": {
},
"expect": compute.InstanceNUMATopology(cells=
[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
},
{
# NUMA + CPU pinning requested in the image
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2
}),
"image": {
"properties": {
"hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED
}},
"expect": compute.InstanceNUMATopology(cells=
[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
compute.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
},
{
# no NUMA + CPU pinning requested in the image
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={}),
"image": {
"properties": {
"hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED
}},
"expect": compute.InstanceNUMATopology(cells=
[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
},
{
# Invalid CPU pinning override
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:cpu_policy": fields.CPUAllocationPolicy.SHARED
}),
"image": {
"properties": {
"hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED}
},
"expect": exception.ImageCPUPinningForbidden,
},
{
# Invalid CPU pinning policy with realtime
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": fields.CPUAllocationPolicy.SHARED,
"hw:cpu_realtime": "yes",
}),
"image": {
"properties": {}
},
"expect": exception.RealtimeConfigurationInvalid,
},
{
# Invalid CPU thread pinning override
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED,
"hw:cpu_thread_policy":
fields.CPUThreadAllocationPolicy.ISOLATE,
}),
"image": {
"properties": {
"hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED,
"hw_cpu_thread_policy":
fields.CPUThreadAllocationPolicy.REQUIRE,
}
},
"expect": exception.ImageCPUThreadPolicyForbidden,
},
{
# Invalid CPU pinning policy with CPU thread pinning
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": fields.CPUAllocationPolicy.SHARED,
"hw:cpu_thread_policy":
fields.CPUThreadAllocationPolicy.ISOLATE,
}),
"image": {
"properties": {}
},
"expect": exception.CPUThreadPolicyConfigurationInvalid,
},
{
# Invalid vCPUs mask with realtime
"flavor": compute.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": "dedicated",
"hw:cpu_realtime": "yes",
}),
"image": {
"properties": {}
},
"expect": exception.RealtimeMaskNotFoundOrInvalid,
},
]
for testitem in testdata:
image_meta = compute.ImageMeta.from_dict(testitem["image"])
if testitem["expect"] is None:
topology = hw.numa_get_constraints(
testitem["flavor"], image_meta)
self.assertIsNone(topology)
elif type(testitem["expect"]) == type:
self.assertRaises(testitem["expect"],
hw.numa_get_constraints,
testitem["flavor"],
image_meta)
else:
topology = hw.numa_get_constraints(
testitem["flavor"], image_meta)
self.assertIsNotNone(topology)
self.assertEqual(len(testitem["expect"].cells),
len(topology.cells))
for i in range(len(topology.cells)):
self.assertEqual(testitem["expect"].cells[i].id,
topology.cells[i].id)
self.assertEqual(testitem["expect"].cells[i].cpuset,
topology.cells[i].cpuset)
self.assertEqual(testitem["expect"].cells[i].memory,
topology.cells[i].memory)
self.assertEqual(testitem["expect"].cells[i].pagesize,
topology.cells[i].pagesize)
self.assertEqual(testitem["expect"].cells[i].cpu_pinning,
topology.cells[i].cpu_pinning)
def test_host_usage_contiguous(self):
hpages0_4K = compute.NUMAPagesTopology(size_kb=4, total=256, used=0)
hpages0_2M = compute.NUMAPagesTopology(size_kb=2048, total=0, used=1)
hpages1_4K = compute.NUMAPagesTopology(size_kb=4, total=128, used=2)
hpages1_2M = compute.NUMAPagesTopology(size_kb=2048, total=0, used=3)
hosttopo = compute.NUMATopology(cells=[
compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[
hpages0_4K, hpages0_2M],
siblings=[], pinned_cpus=set([])),
compute.NUMACell(id=1, cpuset=set([4, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[
hpages1_4K, hpages1_2M],
siblings=[], pinned_cpus=set([])),
compute.NUMACell(id=2, cpuset=set([5, 7]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = compute.InstanceNUMATopology(cells=[
compute.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256),
compute.InstanceNUMACell(id=1, cpuset=set([4]), memory=256),
])
instance2 = compute.InstanceNUMATopology(cells=[
compute.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256),
compute.InstanceNUMACell(id=1, cpuset=set([5, 7]), memory=256),
])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1, instance2])
self.assertEqual(len(hosttopo), len(hostusage))
self.assertIsInstance(hostusage.cells[0], compute.NUMACell)
self.assertEqual(hosttopo.cells[0].cpuset,
hostusage.cells[0].cpuset)
self.assertEqual(hosttopo.cells[0].memory,
hostusage.cells[0].memory)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertEqual(hostusage.cells[0].mempages, [
hpages0_4K, hpages0_2M])
self.assertIsInstance(hostusage.cells[1], compute.NUMACell)
self.assertEqual(hosttopo.cells[1].cpuset,
hostusage.cells[1].cpuset)
self.assertEqual(hosttopo.cells[1].memory,
hostusage.cells[1].memory)
self.assertEqual(hostusage.cells[1].cpu_usage, 3)
self.assertEqual(hostusage.cells[1].memory_usage, 512)
self.assertEqual(hostusage.cells[1].mempages, [
hpages1_4K, hpages1_2M])
self.assertEqual(256, hpages0_4K.total)
self.assertEqual(0, hpages0_4K.used)
self.assertEqual(0, hpages0_2M.total)
self.assertEqual(1, hpages0_2M.used)
self.assertIsInstance(hostusage.cells[2], compute.NUMACell)
self.assertEqual(hosttopo.cells[2].cpuset,
hostusage.cells[2].cpuset)
self.assertEqual(hosttopo.cells[2].memory,
hostusage.cells[2].memory)
self.assertEqual(hostusage.cells[2].cpu_usage, 0)
self.assertEqual(hostusage.cells[2].memory_usage, 0)
self.assertEqual(128, hpages1_4K.total)
self.assertEqual(2, hpages1_4K.used)
self.assertEqual(0, hpages1_2M.total)
self.assertEqual(3, hpages1_2M.used)
def test_host_usage_sparse(self):
hosttopo = compute.NUMATopology(cells=[
compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
compute.NUMACell(id=5, cpuset=set([4, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
compute.NUMACell(id=6, cpuset=set([5, 7]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = compute.InstanceNUMATopology(cells=[
compute.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256),
compute.InstanceNUMACell(id=6, cpuset=set([4]), memory=256),
])
instance2 = compute.InstanceNUMATopology(cells=[
compute.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[]),
compute.InstanceNUMACell(id=5, cpuset=set([5, 7]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[]),
])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1, instance2])
self.assertEqual(len(hosttopo), len(hostusage))
self.assertIsInstance(hostusage.cells[0], compute.NUMACell)
self.assertEqual(hosttopo.cells[0].id,
hostusage.cells[0].id)
self.assertEqual(hosttopo.cells[0].cpuset,
hostusage.cells[0].cpuset)
self.assertEqual(hosttopo.cells[0].memory,
hostusage.cells[0].memory)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertIsInstance(hostusage.cells[1], compute.NUMACell)
self.assertEqual(hosttopo.cells[1].id,
hostusage.cells[1].id)
self.assertEqual(hosttopo.cells[1].cpuset,
hostusage.cells[1].cpuset)
self.assertEqual(hosttopo.cells[1].memory,
hostusage.cells[1].memory)
self.assertEqual(hostusage.cells[1].cpu_usage, 2)
self.assertEqual(hostusage.cells[1].memory_usage, 256)
self.assertIsInstance(hostusage.cells[2], compute.NUMACell)
self.assertEqual(hosttopo.cells[2].cpuset,
hostusage.cells[2].cpuset)
self.assertEqual(hosttopo.cells[2].memory,
hostusage.cells[2].memory)
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
def test_host_usage_culmulative_with_free(self):
hosttopo = compute.NUMATopology(cells=[
compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=2, memory_usage=512, mempages=[],
siblings=[], pinned_cpus=set([])),
compute.NUMACell(id=1, cpuset=set([4, 6]), memory=512,
cpu_usage=1, memory_usage=512, mempages=[],
siblings=[], pinned_cpus=set([])),
compute.NUMACell(id=2, cpuset=set([5, 7]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = compute.InstanceNUMATopology(cells=[
compute.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=512),
compute.InstanceNUMACell(id=1, cpuset=set([3]), memory=256),
compute.InstanceNUMACell(id=2, cpuset=set([4]), memory=256)])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1])
self.assertIsInstance(hostusage.cells[0], compute.NUMACell)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 1024)
self.assertIsInstance(hostusage.cells[1], compute.NUMACell)
self.assertEqual(hostusage.cells[1].cpu_usage, 2)
self.assertEqual(hostusage.cells[1].memory_usage, 768)
self.assertIsInstance(hostusage.cells[2], compute.NUMACell)
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
# Test freeing of resources
hostusage = hw.numa_usage_from_instances(
hostusage, [instance1], free=True)
self.assertEqual(hostusage.cells[0].cpu_usage, 2)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertEqual(hostusage.cells[1].cpu_usage, 1)
self.assertEqual(hostusage.cells[1].memory_usage, 512)
self.assertEqual(hostusage.cells[2].cpu_usage, 0)
self.assertEqual(hostusage.cells[2].memory_usage, 0)
def test_topo_usage_none(self):
hosttopo = compute.NUMATopology(cells=[
compute.NUMACell(id=0, cpuset=set([0, 1]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
compute.NUMACell(id=1, cpuset=set([2, 3]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = compute.InstanceNUMATopology(cells=[
compute.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256),
compute.InstanceNUMACell(id=2, cpuset=set([2]), memory=256),
])
hostusage = hw.numa_usage_from_instances(
None, [instance1])
self.assertIsNone(hostusage)
hostusage = hw.numa_usage_from_instances(
hosttopo, [])
self.assertEqual(hostusage.cells[0].cpu_usage, 0)
self.assertEqual(hostusage.cells[0].memory_usage, 0)
self.assertEqual(hostusage.cells[1].cpu_usage, 0)
self.assertEqual(hostusage.cells[1].memory_usage, 0)
hostusage = hw.numa_usage_from_instances(
hosttopo, None)
self.assertEqual(hostusage.cells[0].cpu_usage, 0)
self.assertEqual(hostusage.cells[0].memory_usage, 0)
self.assertEqual(hostusage.cells[1].cpu_usage, 0)
self.assertEqual(hostusage.cells[1].memory_usage, 0)
def assertNUMACellMatches(self, expected_cell, got_cell):
attrs = ('cpuset', 'memory', 'id')
if isinstance(expected_cell, compute.NUMATopology):
attrs += ('cpu_usage', 'memory_usage')
for attr in attrs:
self.assertEqual(getattr(expected_cell, attr),
getattr(got_cell, attr))
def test_json(self):
expected = compute.NUMATopology(
cells=[
compute.NUMACell(id=1, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
compute.NUMACell(id=2, cpuset=set([3, 4]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))])
got = compute.NUMATopology.obj_from_db_obj(expected._to_json())
for exp_cell, got_cell in zip(expected.cells, got.cells):
self.assertNUMACellMatches(exp_cell, got_cell)
class VirtNUMATopologyCellUsageTestCase(test.NoDBTestCase):
def test_fit_instance_cell_success_no_limit(self):
host_cell = compute.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))
instance_cell = compute.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(host_cell, instance_cell)
self.assertIsInstance(fitted_cell, compute.InstanceNUMACell)
self.assertEqual(host_cell.id, fitted_cell.id)
def test_fit_instance_cell_success_w_limit(self):
host_cell = compute.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=2,
memory_usage=1024,
mempages=[], siblings=[],
pinned_cpus=set([]))
limit_cell = compute.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
instance_cell = compute.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsInstance(fitted_cell, compute.InstanceNUMACell)
self.assertEqual(host_cell.id, fitted_cell.id)
def test_fit_instance_cell_self_overcommit(self):
host_cell = compute.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))
limit_cell = compute.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
instance_cell = compute.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3]), memory=4096)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
def test_fit_instance_cell_fail_w_limit(self):
host_cell = compute.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=2,
memory_usage=1024,
mempages=[], siblings=[],
pinned_cpus=set([]))
instance_cell = compute.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=4096)
limit_cell = compute.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
instance_cell = compute.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3, 4, 5]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
def setUp(self):
super(VirtNUMAHostTopologyTestCase, self).setUp()
self.host = compute.NUMATopology(
cells=[
compute.NUMACell(id=1, cpuset=set([1, 2]), memory=2048,
cpu_usage=2, memory_usage=2048,
mempages=[], siblings=[],
pinned_cpus=set([])),
compute.NUMACell(id=2, cpuset=set([3, 4]), memory=2048,
cpu_usage=2, memory_usage=2048,
mempages=[], siblings=[],
pinned_cpus=set([]))])
self.limits = compute.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
self.instance1 = compute.InstanceNUMATopology(
cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=2048)])
self.instance2 = compute.InstanceNUMATopology(
cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3, 4]), memory=1024)])
self.instance3 = compute.InstanceNUMATopology(
cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)])
def test_get_fitting_success_no_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1)
self.assertIsInstance(fitted_instance1, compute.InstanceNUMATopology)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance3)
self.assertIsInstance(fitted_instance2, compute.InstanceNUMATopology)
def test_get_fitting_success_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
self.host, self.instance3, self.limits)
self.assertIsInstance(fitted_instance, compute.InstanceNUMATopology)
self.assertEqual(1, fitted_instance.cells[0].id)
def test_get_fitting_fails_no_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance)
def test_get_fitting_culmulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, compute.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance2)
def test_get_fitting_culmulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, compute.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance3, self.limits)
self.assertIsInstance(fitted_instance2, compute.InstanceNUMATopology)
self.assertEqual(2, fitted_instance2.cells[0].id)
def test_get_fitting_pci_success(self):
pci_request = compute.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
pci_reqs = [pci_request]
pci_stats = stats.PciDeviceStats()
with mock.patch.object(stats.PciDeviceStats,
'support_requests', return_value= True):
fitted_instance1 = hw.numa_fit_instance_to_host(self.host,
self.instance1,
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsInstance(fitted_instance1,
compute.InstanceNUMATopology)
def test_get_fitting_pci_fail(self):
pci_request = compute.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
pci_reqs = [pci_request]
pci_stats = stats.PciDeviceStats()
with mock.patch.object(stats.PciDeviceStats,
'support_requests', return_value= False):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host,
self.instance1,
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsNone(fitted_instance1)
class NumberOfSerialPortsTest(test.NoDBTestCase):
def test_flavor(self):
flavor = compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = compute.ImageMeta.from_dict({})
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(3, num_ports)
def test_image_meta(self):
flavor = compute.Flavor(vcpus=8, memory_mb=2048, extra_specs={})
image_meta = compute.ImageMeta.from_dict(
{"properties": {"hw_serial_port_count": 2}})
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(2, num_ports)
def test_flavor_invalid_value(self):
flavor = compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 'foo'})
image_meta = compute.ImageMeta.from_dict({})
self.assertRaises(exception.ImageSerialPortNumberInvalid,
hw.get_number_of_serial_ports,
flavor, image_meta)
def test_image_meta_smaller_than_flavor(self):
flavor = compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = compute.ImageMeta.from_dict(
{"properties": {"hw_serial_port_count": 2}})
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(2, num_ports)
def test_flavor_smaller_than_image_meta(self):
flavor = compute.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = compute.ImageMeta.from_dict(
{"properties": {"hw_serial_port_count": 4}})
self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue,
hw.get_number_of_serial_ports,
flavor, image_meta)
class HelperMethodsTestCase(test.NoDBTestCase):
def setUp(self):
super(HelperMethodsTestCase, self).setUp()
self.hosttopo = compute.NUMATopology(cells=[
compute.NUMACell(id=0, cpuset=set([0, 1]), memory=512,
memory_usage=0, cpu_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
compute.NUMACell(id=1, cpuset=set([2, 3]), memory=512,
memory_usage=0, cpu_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
self.instancetopo = compute.InstanceNUMATopology(
instance_uuid='fake-uuid',
cells=[
compute.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=256, pagesize=2048,
cpu_pinning={0: 0, 1: 1}),
compute.InstanceNUMACell(
id=1, cpuset=set([2]), memory=256, pagesize=2048,
cpu_pinning={2: 3}),
])
self.context = context.RequestContext('fake-user',
'fake-project')
def _check_usage(self, host_usage):
self.assertEqual(2, host_usage.cells[0].cpu_usage)
self.assertEqual(256, host_usage.cells[0].memory_usage)
self.assertEqual(1, host_usage.cells[1].cpu_usage)
self.assertEqual(256, host_usage.cells[1].memory_usage)
def test_dicts_json(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(compute.NUMATopology.obj_from_db_obj(res))
def test_dicts_instance_json(self):
host = {'numa_topology': self.hosttopo}
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, compute.NUMATopology)
self._check_usage(res)
def test_dicts_instance_json_old(self):
host = {'numa_topology': self.hosttopo}
instance = {'numa_topology':
jsonutils.dumps(self.instancetopo._to_dict())}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, compute.NUMATopology)
self._check_usage(res)
def test_dicts_host_json(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(compute.NUMATopology.obj_from_db_obj(res))
def test_dicts_host_json_old(self):
host = {'numa_topology': jsonutils.dumps(
self.hosttopo._to_dict())}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(compute.NUMATopology.obj_from_db_obj(res))
def test_object_host_instance_json(self):
host = compute.ComputeNode(numa_topology=self.hosttopo._to_json())
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(compute.NUMATopology.obj_from_db_obj(res))
def test_object_host_instance(self):
host = compute.ComputeNode(numa_topology=self.hosttopo._to_json())
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(compute.NUMATopology.obj_from_db_obj(res))
def test_instance_with_fetch(self):
host = compute.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = {'uuid': fake_uuid}
with mock.patch.object(compute.InstanceNUMATopology,
'get_by_instance_uuid', return_value=None) as get_mock:
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self.assertTrue(get_mock.called)
def test_object_instance_with_load(self):
host = compute.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = compute.Instance(context=self.context, uuid=fake_uuid)
with mock.patch.object(compute.InstanceNUMATopology,
'get_by_instance_uuid', return_value=None) as get_mock:
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self.assertTrue(get_mock.called)
def test_instance_serialized_by_build_request_spec(self):
host = compute.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = compute.Instance(context=self.context, id=1, uuid=fake_uuid,
numa_topology=self.instancetopo)
# NOTE (ndipanov): This emulates scheduler.utils.build_request_spec
# We can remove this test once we no longer use that method.
instance_raw = jsonutils.to_primitive(
base_obj.obj_to_primitive(instance))
res = hw.get_host_numa_usage_from_instance(host, instance_raw)
self.assertIsInstance(res, six.string_types)
self._check_usage(compute.NUMATopology.obj_from_db_obj(res))
def test_attr_host(self):
class Host(object):
def __init__(obj):
obj.numa_topology = self.hosttopo._to_json()
host = Host()
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(compute.NUMATopology.obj_from_db_obj(res))
def test_never_serialize_result(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance,
never_serialize_result=True)
self.assertIsInstance(res, compute.NUMATopology)
self._check_usage(res)
def test_dict_numa_topology_to_obj(self):
fake_uuid = str(uuid.uuid4())
instance = compute.Instance(context=self.context, id=1, uuid=fake_uuid,
numa_topology=self.instancetopo)
instance_dict = base_obj.obj_to_primitive(instance)
instance_numa_topo = hw.instance_topology_from_instance(instance_dict)
for expected_cell, actual_cell in zip(self.instancetopo.cells,
instance_numa_topo.cells):
for k in expected_cell.fields:
self.assertEqual(getattr(expected_cell, k),
getattr(actual_cell, k))
class VirtMemoryPagesTestCase(test.NoDBTestCase):
def test_cell_instance_pagesize(self):
cell = compute.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=2048)
self.assertEqual(0, cell.id)
self.assertEqual(set([0]), cell.cpuset)
self.assertEqual(1024, cell.memory)
self.assertEqual(2048, cell.pagesize)
def test_numa_pagesize_usage_from_cell(self):
instcell = compute.InstanceNUMACell(
id=0, cpuset=set([0]), memory=512, pagesize=2048)
hostcell = compute.NUMACell(
id=0, cpuset=set([0]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[compute.NUMAPagesTopology(
size_kb=2048,
total=512,
used=0)],
siblings=[], pinned_cpus=set([]))
topo = hw._numa_pagesize_usage_from_cell(hostcell, instcell, 1)
self.assertEqual(2048, topo[0].size_kb)
self.assertEqual(512, topo[0].total)
self.assertEqual(256, topo[0].used)
def _test_get_requested_mempages_pagesize(self, spec=None, props=None):
flavor = compute.Flavor(vcpus=16, memory_mb=2048,
extra_specs=spec or {})
image_meta = compute.ImageMeta.from_dict({"properties": props or {}})
return hw._numa_get_pagesize_constraints(flavor, image_meta)
def test_get_requested_mempages_pagesize_from_flavor_swipe(self):
self.assertEqual(
hw.MEMPAGES_SMALL, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "small"}))
self.assertEqual(
hw.MEMPAGES_LARGE, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "large"}))
self.assertEqual(
hw.MEMPAGES_ANY, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "any"}))
def test_get_requested_mempages_pagesize_from_flavor_specific(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_flavor_invalid(self):
self.assertRaises(
exception.MemoryPageSizeInvalid,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "foo"})
self.assertRaises(
exception.MemoryPageSizeInvalid,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "-42"})
def test_get_requested_mempages_pagesizes_from_flavor_suffix_sweep(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "2048KB"}))
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "2MB"}))
self.assertEqual(
1048576,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "1GB"}))
def test_get_requested_mempages_pagesize_from_image_flavor_any(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "any"},
props={"hw_mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_image_flavor_large(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "large"},
props={"hw_mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_image_forbidden(self):
self.assertRaises(
exception.MemoryPageSizeForbidden,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "small"},
{"hw_mem_page_size": "2048"})
def test_get_requested_mempages_pagesize_from_image_forbidden2(self):
self.assertRaises(
exception.MemoryPageSizeForbidden,
self._test_get_requested_mempages_pagesize,
{}, {"hw_mem_page_size": "2048"})
def test_cell_accepts_request_wipe(self):
host_cell = compute.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
compute.NUMAPagesTopology(size_kb=4, total=262144, used=0),
],
siblings=[], pinned_cpus=set([]))
inst_cell = compute.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_SMALL)
self.assertEqual(
4,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
inst_cell = compute.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_ANY)
self.assertEqual(
4,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
inst_cell = compute.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE)
self.assertIsNone(hw._numa_cell_supports_pagesize_request(
host_cell, inst_cell))
def test_cell_accepts_request_large_pass(self):
inst_cell = compute.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE)
host_cell = compute.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
compute.NUMAPagesTopology(size_kb=4, total=256, used=0),
compute.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertEqual(
2048,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
def test_cell_accepts_request_custom_pass(self):
inst_cell = compute.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=2048)
host_cell = compute.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
compute.NUMAPagesTopology(size_kb=4, total=256, used=0),
compute.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertEqual(
2048,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
def test_cell_accepts_request_remainder_memory(self):
# Test memory can't be divided with no rem by mempage's size_kb
inst_cell = compute.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024 + 1, pagesize=2048)
host_cell = compute.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
compute.NUMAPagesTopology(size_kb=4, total=256, used=0),
compute.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertIsNone(hw._numa_cell_supports_pagesize_request(
host_cell, inst_cell))
def test_cell_accepts_request_host_mempages(self):
# Test pagesize not in host's mempages
inst_cell = compute.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=4096)
host_cell = compute.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
compute.NUMAPagesTopology(size_kb=4, total=256, used=0),
compute.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertRaises(exception.MemoryPageSizeNotSupported,
hw._numa_cell_supports_pagesize_request,
host_cell, inst_cell)
class _CPUPinningTestCaseBase(object):
def assertEqualTopology(self, expected, got):
for attr in ('sockets', 'cores', 'threads'):
self.assertEqual(getattr(expected, attr), getattr(got, attr),
"Mismatch on %s" % attr)
def assertInstanceCellPinned(self, instance_cell, cell_ids=None):
default_cell_id = 0
self.assertIsNotNone(instance_cell)
if cell_ids is None:
self.assertEqual(default_cell_id, instance_cell.id)
else:
self.assertIn(instance_cell.id, cell_ids)
self.assertEqual(len(instance_cell.cpuset),
len(instance_cell.cpu_pinning))
def assertPinningPreferThreads(self, instance_cell, host_cell):
"""Make sure we are preferring threads.
We do this by assessing that at least 2 CPUs went to the same core
if that was even possible to begin with.
"""
max_free_siblings = max(map(len, host_cell.free_siblings))
if len(instance_cell) > 1 and max_free_siblings > 1:
cpu_to_sib = {}
for sib in host_cell.free_siblings:
for cpu in sib:
cpu_to_sib[cpu] = tuple(sorted(sib))
pins_per_sib = collections.defaultdict(int)
for inst_p, host_p in instance_cell.cpu_pinning.items():
pins_per_sib[cpu_to_sib[host_p]] += 1
self.assertTrue(max(pins_per_sib.values()) > 1,
"Seems threads were not preferred by the pinning "
"logic.")
class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
def test_get_pinning_inst_too_large_cpu(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_too_large_mem(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=1024,
siblings=[], mempages=[],
pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_not_avail(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([0]),
siblings=[], mempages=[])
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_no_sibling_fits_empty(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=3, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 3)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_no_sibling_fits_w_usage(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([1]), mempages=[],
siblings=[])
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=1024)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_pinning = {0: 0, 1: 2, 2: 3}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_fits(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=4, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_empty(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_empty_2(self):
host_pin = compute.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=4, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 8)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self):
host_pin = compute.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([1, 2, 5, 6]),
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[])
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {0: 0, 1: 3, 2: 4, 3: 7}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_host_siblings_fit_single_core(self):
host_pin = compute.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_host_siblings_fit(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_require_policy_too_few_siblings(self):
host_pin = compute.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([0, 1, 2]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[])
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_require_policy_fits(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_require_policy_fits_w_usage(self):
host_pin = compute.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([0, 1]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[])
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_odd_fit(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]),
set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=5, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_fit_optimize_threads(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1, 2, 3]),
set([4, 5, 6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4, 5]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=3, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_odd_fit_w_usage(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]),
set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([0, 2, 5]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=3, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_odd_fit_orphan_only(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]),
set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([0, 2, 5, 6]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=4, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_large_instance_odd_fit(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14,
15]),
memory=4096, memory_usage=0,
siblings=[set([0, 8]), set([1, 9]),
set([2, 10]), set([3, 11]),
set([4, 12]), set([5, 13]),
set([6, 14]), set([7, 15])],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertPinningPreferThreads(inst_pin, host_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=5, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_isolate_policy_too_few_fully_free_cores(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([1]))
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_isolate_policy_no_fully_free_cores(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([1, 2]))
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_isolate_policy_fits(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_isolate_policy_fits_ht_host(self):
host_pin = compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_isolate_policy_fits_w_usage(self):
host_pin = compute.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([0, 1]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[])
inst_pin = compute.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = compute.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
def test_host_numa_fit_instance_to_host_single_cell(self):
host_topo = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([])),
compute.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))]
)
inst_topo = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
def test_host_numa_fit_instance_to_host_single_cell_w_usage(self):
host_topo = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1]),
pinned_cpus=set([0]), memory=2048,
memory_usage=0, siblings=[],
mempages=[]),
compute.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_topo = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(1,))
def test_host_numa_fit_instance_to_host_single_cell_fail(self):
host_topo = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1]), memory=2048,
pinned_cpus=set([0]), memory_usage=0,
siblings=[], mempages=[]),
compute.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
pinned_cpus=set([2]), memory_usage=0,
siblings=[], mempages=[])])
inst_topo = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fit(self):
host_topo = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([])),
compute.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([]))])
inst_topo = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
compute.InstanceNUMACell(
cpuset=set([2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
def test_host_numa_fit_instance_to_host_barely_fit(self):
host_topo = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, pinned_cpus=set([0]),
siblings=[], mempages=[],
memory_usage=0),
compute.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([4, 5, 6])),
compute.NUMACell(id=2, cpuset=set([8, 9, 10, 11]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([10, 11]))])
inst_topo = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
compute.InstanceNUMACell(
cpuset=set([2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 2))
def test_host_numa_fit_instance_to_host_fail_capacity(self):
host_topo = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([0])),
compute.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([4, 5, 6]))])
inst_topo = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
compute.InstanceNUMACell(
cpuset=set([2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fail_topology(self):
host_topo = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([])),
compute.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([]))])
inst_topo = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
compute.InstanceNUMACell(
cpuset=set([2, 3]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
compute.InstanceNUMACell(
cpuset=set([4, 5]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_cpu_pinning_usage_from_instances(self):
host_pin = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), id=0, memory=2048,
cpu_pinning={0: 0, 1: 3},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_pin_2 = compute.InstanceNUMATopology(
cells = [compute.InstanceNUMACell(
cpuset=set([0, 1]), id=0, memory=2048,
cpu_pinning={0: 1, 1: 2},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
host_pin = hw.numa_usage_from_instances(
host_pin, [inst_pin_1, inst_pin_2])
self.assertEqual(set([0, 1, 2, 3]),
host_pin.cells[0].pinned_cpus)
def test_cpu_pinning_usage_from_instances_free(self):
host_pin = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([0, 1, 3]))])
inst_pin_1 = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0]), memory=1024, cpu_pinning={0: 1}, id=0,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_pin_2 = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=1024, id=0,
cpu_pinning={0: 0, 1: 3},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
host_pin = hw.numa_usage_from_instances(
host_pin, [inst_pin_1, inst_pin_2], free=True)
self.assertEqual(set(), host_pin.cells[0].pinned_cpus)
def test_host_usage_from_instances_fail(self):
host_pin = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, id=0,
cpu_pinning={0: 0, 1: 3},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_pin_2 = compute.InstanceNUMATopology(
cells = [compute.InstanceNUMACell(
cpuset=set([0, 1]), id=0, memory=2048,
cpu_pinning={0: 0, 1: 2},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
self.assertRaises(exception.CPUPinningInvalid,
hw.numa_usage_from_instances, host_pin,
[inst_pin_1, inst_pin_2])
def test_host_usage_from_instances_isolate(self):
host_pin = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0,
siblings=[set([0, 2]), set([1, 3])],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, id=0,
cpu_pinning={0: 0, 1: 1},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
new_cell = hw.numa_usage_from_instances(host_pin, [inst_pin_1])
self.assertEqual(host_pin.cells[0].cpuset,
new_cell.cells[0].pinned_cpus)
self.assertEqual(new_cell.cells[0].cpu_usage, 4)
def test_host_usage_from_instances_isolate_free(self):
host_pin = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=4,
memory_usage=0,
siblings=[set([0, 2]), set([1, 3])],
mempages=[],
pinned_cpus=set([0, 1, 2, 3]))])
inst_pin_1 = compute.InstanceNUMATopology(
cells=[compute.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, id=0,
cpu_pinning={0: 0, 1: 1},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
new_cell = hw.numa_usage_from_instances(host_pin,
[inst_pin_1],
free=True)
self.assertEqual(set([]), new_cell.cells[0].pinned_cpus)
self.assertEqual(new_cell.cells[0].cpu_usage, 0)
class CPURealtimeTestCase(test.NoDBTestCase):
def test_success_flavor(self):
flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^1"}}
image = compute.ImageMeta.from_dict({})
rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image)
self.assertEqual(set([0, 2]), rt)
self.assertEqual(set([1]), em)
def test_success_image(self):
flavor = {"extra_specs": {}}
image = compute.ImageMeta.from_dict(
{"properties": {"hw_cpu_realtime_mask": "^0-1"}})
rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image)
self.assertEqual(set([2]), rt)
self.assertEqual(set([0, 1]), em)
def test_no_mask_configured(self):
flavor = {"extra_specs": {}}
image = compute.ImageMeta.from_dict({"properties": {}})
self.assertRaises(
exception.RealtimeMaskNotFoundOrInvalid,
hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image)
def test_mask_badly_configured(self):
flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^0-2"}}
image = compute.ImageMeta.from_dict({"properties": {}})
self.assertRaises(
exception.RealtimeMaskNotFoundOrInvalid,
hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image)
| apache-2.0 |
bigfatpaulyj/py-airfoil | scons-local-2.2.0/SCons/Tool/mssdk.py | 14 | 1891 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mssdk.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
"""engine.SCons.Tool.mssdk
Tool-specific initialization for Microsoft SDKs, both Platform
SDKs and Windows SDKs.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from MSCommon import mssdk_exists, \
mssdk_setup_env
def generate(env):
"""Add construction variables for an MS SDK to an Environment."""
mssdk_setup_env(env)
def exists(env):
return mssdk_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
keflavich/APEX_CMZ_H2CO | reduction/calibration.py | 1 | 7537 | import numpy as np
from astropy import log
from pyspeckit.spectrum.readers import read_class
from make_apex_cubes import all_apexfiles
import os
import paths
import collections
"""
Calibrators can be IDd with:
for ds in all_apexfiles:
cl = read_class.ClassObject(ds)
spdata = cl.get_spectra(line='CO(2-1)', range=np.array([-1,1,-1,1.])/206265.)
rslt = set([h['OBJECT'] for d,h in spdata if 'SKY' not in h['OBJECT'] and 'COLD' not in h['OBJECT'] and 'TCAL' not in h['OBJECT'] and 'TSYS' not in h['OBJECT'] and 'TREC' not in h['OBJECT']])
print os.path.basename(ds), rslt
H2CO cals:
for ds in all_apexfiles:
cl = read_class.ClassObject(ds)
spdata = cl.get_spectra(linere='shfi219|h2co', range=np.array([-1,1,-1,1.])/206265.)
rslt = set([h['OBJECT'] for d,h in spdata if 'SKY' not in h['OBJECT'] and 'COLD' not in h['OBJECT'] and 'TCAL' not in h['OBJECT'] and 'TSYS' not in h['OBJECT'] and 'TREC' not in h['OBJECT'] and 'MAP' not in h['OBJECT']])
print os.path.basename(ds), rslt
"""
calibrators = {'CO':('RAFGL1922', 'NGC6302', 'RT-SCO', 'IRAS15194-51',
'RAFGL2135', 'EP-AQR', 'PI1-GRU', 'RAFGL4211', 'CRL2688',
'IRAS17150-32'),
'H2CO': ('SGRB2(M)', 'SGRB2(N)', ),
}
all_tels = ['AP-H201-X202', 'AP-H201-X201', 'AP-H201-F102', 'AP-H201-F101']
telescopere = "|".join(all_tels)
linere = {'H2CO':'h2co|shfi219',
'CO':'CO\(2-1\)'}
velocities = collections.defaultdict(lambda: 0.0, {'SGRB2(N)':65.0,
'SGRB2(M)':65.0,
'RAFGL2135':49.39,
})
cal_data = {species:{tel:{cal:[] for cal in calibrators[species]}
for tel in all_tels}
for species in calibrators}
cal_spectra = {species:{tel:{cal:[] for cal in calibrators[species]}
for tel in all_tels}
for species in calibrators}
for ds in all_apexfiles:
cl = read_class.ClassObject(ds)
sources = [s.strip() for s in cl.sources]
for species in calibrators:
for calibrator in calibrators[species]:
if calibrator in sources:
try:
spectra = cl.get_pyspeckit_spectra(source=calibrator,
linere=linere[species],
telescopere=telescopere,
range=np.array([-1,1,-1,1.])/206265.)
except ValueError:
log.info("Skipped {0} {1}".format(calibrator, species))
continue
vcen = float(spectra[0].header['VOFF'])
if vcen == 0:
vcen = velocities[calibrator]
vmin,vmax = vcen-50, vcen+50
for sp in spectra:
if sp.data.max() > 1e4:
#log.info("Skipped {3} {1} {0} {2} - extreme values"
# .format(sp.header['XTEL'], calibrator,
# sp.header['DOBS'], species))
continue
if species == 'H2CO': # use CH3OH because H2CO is absorbed
if 218.39258144781465 > sp.xarr.min() and 218.39258144781465 < sp.xarr.max():
sp.xarr.refX = 218.44005 #218.39258144781465 # CH3OH + 65.147 km/s
else:
sp.xarr.refX = 218.90336
sp.xarr.refX_unit = 'GHz'
sp.xarr.convert_to_unit('km/s')
if sp.xarr.min() > 50 or sp.xarr.max() < -50:
log.info("Skipped {3} {1} {0} {2} - out of range"
.format(sp.header['XTEL'], calibrator,
sp.header['DOBS'], species))
continue
sp.specfit(guesses=[0,5,vcen,5], negamp=False,
fittype='vheightgaussian', xmin=vmin, xmax=vmax,
verbose=False)
while sp.specfit.parinfo.AMPLITUDE0.value == 0:
log.info("Repeating {3} {1} {0} {2} - zeros are unacceptable"
.format(sp.header['XTEL'], calibrator,
sp.header['DOBS'], species))
sp.specfit(guesses=[0,5,vcen,5], negamp=False,
fittype='vheightgaussian', xmin=vmin, xmax=vmax,
verbose=False)
rslt = (sp.header['DOBS'], sp.specfit.parinfo.AMPLITUDE0.value)
cal_data[species][sp.header['XTEL']][calibrator].append(rslt)
cal_spectra[species][sp.header['XTEL']][calibrator].append(sp)
log.info("{5} {1} {0} {2}: A={3}, v={4}"
.format(sp.header['XTEL'], calibrator,
sp.header['DOBS'],
sp.specfit.parinfo.AMPLITUDE0.value,
sp.specfit.parinfo.SHIFT0.value,
species))
if abs(sp.specfit.parinfo.SHIFT0.value - vcen) > 5:
log.warn("Velocity difference for {3} {0}:{1} was {2}"
.format(sp.header['XTEL'], calibrator,
abs(sp.specfit.parinfo.SHIFT0.value - vcen),
species))
if not (os.path.exists(paths.fpath("calibration")) and
os.path.isdir(paths.fpath('calibration'))):
os.mkdir(paths.fpath('calibration'))
import pylab as pl
from astropy.time import Time
date1 = Time('2014-06-13 00:00:00.000', format='iso').jyear
date2 = Time('2014-04-23 00:00:00.000', format='iso').jyear
for species in cal_data:
for tel in cal_data[species]:
for source in cal_data[species][tel]:
if (cal_data[species][tel][source]) == []:
continue
date,data = np.array(cal_data[species][tel][source]).T
if len(data) > 0:
pl.clf()
pl.plot(date[data>0.5], data[data>0.5], '.')
pl.xlabel("Decimal Year")
pl.ylabel("Amplitude (K)")
pl.title("{0} {1} {2}".format(species, tel, source))
pl.savefig(paths.fpath('calibration/{0}_{1}_{2}_calibration.png'
.format(species, tel, source)))
mask = (date>date2) & (date<date1) & (data>1)
mask2014 = (date>date1) & (data>1)
if mask.sum() and mask2014.sum():
bad_mean = data[mask].mean()
good_mean = data[mask2014].mean()
print("{0:5s} {1:10s} {2:12s}: calfactor={3:5.3f} "
"npts(apr-june)={4:3d} npts(>june)={5:3d}".format(species,
tel,
source,
good_mean/bad_mean,
mask.sum(),
mask2014.sum()))
| bsd-3-clause |
Razorsheep/kleinesonne | node_modules/node-gyp/gyp/pylib/gyp/MSVSNew.py | 1835 | 12124 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
| agpl-3.0 |
ewbankkit/cloud-custodian | c7n/actions/securityhub.py | 3 | 12857 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from datetime import datetime
from dateutil.tz import tzutc
import hashlib
import jmespath
import json
from .core import BaseAction
from c7n.utils import (
type_schema, local_session, chunks, dumps, filter_empty, get_partition)
from c7n.exceptions import PolicyValidationError
from c7n.manager import resources as aws_resources
from c7n.version import version
FindingTypes = {
"Software and Configuration Checks",
"TTPs",
"Effects",
"Unusual Behaviors",
"Sensitive Data Identifications"
}
# Mostly undocumented value size limit
SECHUB_VALUE_SIZE_LIMIT = 1024
class PostFinding(BaseAction):
"""Report a finding to AWS Security Hub.
Custodian acts as a finding provider, allowing users to craft
policies that report to the AWS SecurityHub.
For resources that are taggable, we will tag the resource with an identifier
such that further findings generate updates.
Example generate a finding for accounts that don't have shield enabled.
:example:
.. code-block:: yaml
policies:
- name: account-shield-enabled
resource: account
filters:
- shield-enabled
actions:
- type: post-finding
description: |
Shield should be enabled on account to allow for DDOS protection (1 time 3k USD Charge).
severity_normalized: 6
types:
- "Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)"
recommendation: "Enable shield"
recommendation_url: "https://www.example.com/policies/AntiDDoS.html"
confidence: 100
compliance_status: FAILED
""" # NOQA
FindingVersion = "2018-10-08"
ProductName = "default"
permissions = ('securityhub:BatchImportFindings',)
schema_alias = True
schema = type_schema(
"post-finding",
required=["types"],
title={"type": "string"},
description={'type': 'string'},
severity={"type": "number", 'default': 0},
severity_normalized={"type": "number", "min": 0, "max": 100, 'default': 0},
confidence={"type": "number", "min": 0, "max": 100},
criticality={"type": "number", "min": 0, "max": 100},
# Cross region aggregation
region={'type': 'string', 'description': 'cross-region aggregation target'},
recommendation={"type": "string"},
recommendation_url={"type": "string"},
fields={"type": "object"},
batch_size={'type': 'integer', 'minimum': 1, 'maximum': 10},
types={
"type": "array",
"minItems": 1,
"items": {"type": "string"},
},
compliance_status={
"type": "string",
"enum": ["PASSED", "WARNING", "FAILED", "NOT_AVAILABLE"],
},
)
NEW_FINDING = 'New'
def validate(self):
for finding_type in self.data["types"]:
if finding_type.count('/') > 2 or finding_type.split('/')[0] not in FindingTypes:
raise PolicyValidationError(
"Finding types must be in the format 'namespace/category/classifier'."
" Found {}. Valid namespace values are: {}.".format(
finding_type, " | ".join([ns for ns in FindingTypes])))
def get_finding_tag(self, resource):
finding_tag = None
tags = resource.get('Tags', [])
finding_key = '{}:{}'.format('c7n:FindingId',
self.data.get('title', self.manager.ctx.policy.name))
# Support Tags as dictionary
if isinstance(tags, dict):
return tags.get(finding_key)
# Support Tags as list of {'Key': 'Value'}
for t in tags:
key = t['Key']
value = t['Value']
if key == finding_key:
finding_tag = value
return finding_tag
def group_resources(self, resources):
grouped_resources = {}
for r in resources:
finding_tag = self.get_finding_tag(r) or self.NEW_FINDING
grouped_resources.setdefault(finding_tag, []).append(r)
return grouped_resources
def process(self, resources, event=None):
region_name = self.data.get('region', self.manager.config.region)
client = local_session(
self.manager.session_factory).client(
"securityhub", region_name=region_name)
now = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()
# default batch size to one to work around security hub console issue
# which only shows a single resource in a finding.
batch_size = self.data.get('batch_size', 1)
stats = Counter()
for key, grouped_resources in self.group_resources(resources).items():
for resource_set in chunks(grouped_resources, batch_size):
stats['Finding'] += 1
if key == self.NEW_FINDING:
finding_id = None
created_at = now
updated_at = now
else:
finding_id, created_at = self.get_finding_tag(
resource_set[0]).split(':', 1)
updated_at = now
finding = self.get_finding(
resource_set, finding_id, created_at, updated_at)
import_response = client.batch_import_findings(
Findings=[finding])
if import_response['FailedCount'] > 0:
stats['Failed'] += import_response['FailedCount']
self.log.error(
"import_response=%s" % (import_response))
if key == self.NEW_FINDING:
stats['New'] += len(resource_set)
# Tag resources with new finding ids
tag_action = self.manager.action_registry.get('tag')
if tag_action is None:
continue
tag_action({
'key': '{}:{}'.format(
'c7n:FindingId',
self.data.get(
'title', self.manager.ctx.policy.name)),
'value': '{}:{}'.format(
finding['Id'], created_at)},
self.manager).process(resource_set)
else:
stats['Update'] += len(resource_set)
self.log.debug(
"policy:%s securityhub %d findings resources %d new %d updated %d failed",
self.manager.ctx.policy.name,
stats['Finding'],
stats['New'],
stats['Update'],
stats['Failed'])
def get_finding(self, resources, existing_finding_id, created_at, updated_at):
policy = self.manager.ctx.policy
model = self.manager.resource_type
region = self.data.get('region', self.manager.config.region)
if existing_finding_id:
finding_id = existing_finding_id
else:
finding_id = '{}/{}/{}/{}'.format(
self.manager.config.region,
self.manager.config.account_id,
hashlib.md5(json.dumps(
policy.data).encode('utf8')).hexdigest(),
hashlib.md5(json.dumps(list(sorted(
[r[model.id] for r in resources]))).encode(
'utf8')).hexdigest())
finding = {
"SchemaVersion": self.FindingVersion,
"ProductArn": "arn:aws:securityhub:{}:{}:product/{}/{}".format(
region,
self.manager.config.account_id,
self.manager.config.account_id,
self.ProductName,
),
"AwsAccountId": self.manager.config.account_id,
# Long search chain for description values, as this was
# made required long after users had policies deployed, so
# use explicit description, or policy description, or
# explicit title, or policy name, in that order.
"Description": self.data.get(
"description", policy.data.get(
"description",
self.data.get('title', policy.name))).strip(),
"Title": self.data.get("title", policy.name),
'Id': finding_id,
"GeneratorId": policy.name,
'CreatedAt': created_at,
'UpdatedAt': updated_at,
"RecordState": "ACTIVE",
}
severity = {'Product': 0, 'Normalized': 0}
if self.data.get("severity") is not None:
severity["Product"] = self.data["severity"]
if self.data.get("severity_normalized") is not None:
severity["Normalized"] = self.data["severity_normalized"]
if severity:
finding["Severity"] = severity
recommendation = {}
if self.data.get("recommendation"):
recommendation["Text"] = self.data["recommendation"]
if self.data.get("recommendation_url"):
recommendation["Url"] = self.data["recommendation_url"]
if recommendation:
finding["Remediation"] = {"Recommendation": recommendation}
if "confidence" in self.data:
finding["Confidence"] = self.data["confidence"]
if "criticality" in self.data:
finding["Criticality"] = self.data["criticality"]
if "compliance_status" in self.data:
finding["Compliance"] = {"Status": self.data["compliance_status"]}
fields = {
'resource': policy.resource_type,
'ProviderName': 'CloudCustodian',
'ProviderVersion': version
}
if "fields" in self.data:
fields.update(self.data["fields"])
else:
tags = {}
for t in policy.tags:
if ":" in t:
k, v = t.split(":", 1)
else:
k, v = t, ""
tags[k] = v
fields.update(tags)
if fields:
finding["ProductFields"] = fields
finding_resources = []
for r in resources:
finding_resources.append(self.format_resource(r))
finding["Resources"] = finding_resources
finding["Types"] = list(self.data["types"])
return filter_empty(finding)
def format_resource(self, r):
raise NotImplementedError("subclass responsibility")
class OtherResourcePostFinding(PostFinding):
fields = ()
def format_resource(self, r):
details = {}
for k in r:
if isinstance(k, (list, dict)):
continue
details[k] = r[k]
for f in self.fields:
value = jmespath.search(f['expr'], r)
if not value:
continue
details[f['key']] = value
for k, v in details.items():
if isinstance(v, datetime):
v = v.isoformat()
elif isinstance(v, (list, dict)):
v = dumps(v)
elif isinstance(v, (int, float, bool)):
v = str(v)
else:
continue
details[k] = v[:SECHUB_VALUE_SIZE_LIMIT]
details['c7n:resource-type'] = self.manager.type
other = {
'Type': 'Other',
'Id': self.manager.get_arns([r])[0],
'Region': self.manager.config.region,
'Partition': get_partition(self.manager.config.region),
'Details': {'Other': filter_empty(details)}
}
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if tags:
other['Tags'] = tags
return other
@classmethod
def register_resource(klass, registry, event):
for rtype, resource_manager in registry.items():
if not resource_manager.has_arn():
continue
if 'post-finding' in resource_manager.action_registry:
continue
resource_manager.action_registry.register('post-finding', klass)
aws_resources.subscribe(
aws_resources.EVENT_FINAL, OtherResourcePostFinding.register_resource)
| apache-2.0 |
ar7z1/ansible | lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py | 13 | 35970 | #!/usr/bin/python
# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_loadbalancer
version_added: "2.4"
short_description: Manage Azure load balancers.
description:
- Create, update and delete Azure load balancers
options:
resource_group:
description:
- Name of a resource group where the load balancer exists or will be created.
required: true
name:
description:
- Name of the load balancer.
required: true
state:
description:
- Assert the state of the load balancer. Use C(present) to create/update a load balancer, or
C(absent) to delete one.
default: present
choices:
- absent
- present
location:
description:
- Valid azure location. Defaults to location of the resource group.
sku:
description:
The load balancer SKU.
choices:
- Basic
- Standard
version_added: 2.6
frontend_ip_configurations:
description: List of frontend IPs to be used
suboptions:
name:
description: Name of the frontend ip configuration.
required: True
public_ip_address:
description: Name of an existing public IP address object in the current resource group to associate with the security group.
private_ip_address:
description: The reference of the Public IP resource.
version_added: 2.6
private_ip_allocation_method:
description: The Private IP allocation method.
choices:
- Static
- Dynamic
version_added: 2.6
subnet:
description:
- The reference of the subnet resource.
- Should be an existing subnet's resource id.
version_added: 2.6
version_added: 2.5
backend_address_pools:
description: List of backend address pools
suboptions:
name:
description: Name of the backend address pool.
required: True
version_added: 2.5
probes:
description: List of probe definitions used to check endpoint health.
suboptions:
name:
description: Name of the probe.
required: True
port:
description: Probe port for communicating the probe. Possible values range from 1 to 65535, inclusive.
required: True
protocol:
description:
- The protocol of the end point to be probed.
- If 'Tcp' is specified, a received ACK is required for the probe to be successful.
- If 'Http' is specified, a 200 OK response from the specified URL is required for the probe to be successful.
choices:
- Tcp
- Http
default: Tcp
interval:
description:
- The interval, in seconds, for how frequently to probe the endpoint for health status.
- Slightly less than half the allocated timeout period, which allows two full probes before taking the instance out of rotation.
- The default value is 15, the minimum value is 5.
default: 15
fail_count:
description:
- The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint.
- This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure.
default: 3
aliases:
- number_of_probes
request_path:
description:
- The URI used for requesting health status from the VM.
- Path is required if a protocol is set to http. Otherwise, it is not allowed.
version_added: 2.5
inbound_nat_pools:
description:
- Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer.
- Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range.
- Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules.
- Inbound NAT pools are referenced from virtual machine scale sets.
- NICs that are associated with individual virtual machines cannot reference an inbound NAT pool.
- They have to reference individual inbound NAT rules.
suboptions:
name:
description: Name of the inbound NAT pool.
required: True
frontend_ip_configuration_name:
description: A reference to frontend IP addresses.
required: True
protocol:
description: IP protocol for the NAT pool
choices:
- Tcp
- Udp
- All
default: Tcp
frontend_port_range_start:
description:
- The first port in the range of external ports that will be used to provide inbound NAT to NICs associated with the load balancer.
- Acceptable values range between 1 and 65534.
required: True
frontend_port_range_end:
description:
- The last port in the range of external ports that will be used to provide inbound NAT to NICs associated with the load balancer.
- Acceptable values range between 1 and 65535.
required: True
backend_port:
description:
- The port used for internal connections on the endpoint.
- Acceptable values are between 1 and 65535.
version_added: 2.5
load_balancing_rules:
description:
- Object collection representing the load balancing rules Gets the provisioning.
suboptions:
name:
description: name of the load balancing rule.
required: True
frontend_ip_configuration:
description: A reference to frontend IP addresses.
required: True
backend_address_pool:
description: A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs.
required: True
probe:
description: The name of the load balancer probe this rule should use for health checks.
required: True
protocol:
description: IP protocol for the load balancing rule.
choices:
- Tcp
- Udp
- All
default: Tcp
load_distribution:
description:
- The session persistence policy for this rule; C(Default) is no persistence.
choices:
- Default
- SourceIP
- SourceIPProtocol
default: Default
frontend_port:
description:
- The port for the external endpoint.
- Frontend port numbers must be unique across all rules within the load balancer.
- Acceptable values are between 0 and 65534.
- Note that value 0 enables "Any Port"
backend_port:
description:
- The port used for internal connections on the endpoint.
- Acceptable values are between 0 and 65535.
- Note that value 0 enables "Any Port"
idle_timeout:
description:
- The timeout for the TCP idle connection.
- The value can be set between 4 and 30 minutes.
- The default value is 4 minutes.
- This element is only used when the protocol is set to TCP.
enable_floating_ip:
description:
- Configures SNAT for the VMs in the backend pool to use the publicIP address specified in the frontend of the load balancing rule.
version_added: 2.5
public_ip_address_name:
description:
- (deprecated) Name of an existing public IP address object to associate with the security group.
- This option has been deprecated, and will be removed in 2.9. Use I(frontend_ip_configurations) instead.
aliases:
- public_ip_address
- public_ip_name
- public_ip
probe_port:
description:
- (deprecated) The port that the health probe will use.
- This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
probe_protocol:
description:
- (deprecated) The protocol to use for the health probe.
- This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
choices:
- Tcp
- Http
probe_interval:
description:
- (deprecated) Time (in seconds) between endpoint health probes.
- This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
default: 15
probe_fail_count:
description:
- (deprecated) The amount of probe failures for the load balancer to make a health determination.
- This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
default: 3
probe_request_path:
description:
- (deprecated) The URL that an HTTP probe will use (only relevant if probe_protocol is set to Http).
- This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
protocol:
description:
- (deprecated) The protocol (TCP or UDP) that the load balancer will use.
- This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
choices:
- Tcp
- Udp
load_distribution:
description:
- (deprecated) The type of load distribution that the load balancer will employ.
- This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
choices:
- Default
- SourceIP
- SourceIPProtocol
frontend_port:
description:
- (deprecated) Frontend port that will be exposed for the load balancer.
- This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
backend_port:
description:
- (deprecated) Backend port that will be exposed for the load balancer.
- This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
idle_timeout:
description:
- (deprecated) Timeout for TCP idle connection in minutes.
- This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
default: 4
natpool_frontend_port_start:
description:
- (deprecated) Start of the port range for a NAT pool.
- This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
natpool_frontend_port_end:
description:
- (deprecated) End of the port range for a NAT pool.
- This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
natpool_backend_port:
description:
- (deprecated) Backend port used by the NAT pool.
- This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
natpool_protocol:
description:
- (deprecated) The protocol for the NAT pool.
- This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Thomas Stringer (@tstringer)"
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: create load balancer
azure_rm_loadbalancer:
resource_group: testrg
name: testloadbalancer1
frontend_ip_configurations:
- name: frontendipconf0
public_ip_address: testpip
backend_address_pools:
- name: backendaddrpool0
probes:
- name: prob0
port: 80
inbound_nat_pools:
- name: inboundnatpool0
frontend_ip_configuration_name: frontendipconf0
protocol: Tcp
frontend_port_range_start: 80
frontend_port_range_end: 81
backend_port: 8080
load_balancing_rules:
- name: lbrbalancingrule0
frontend_ip_configuration: frontendipconf0
backend_address_pool: backendaddrpool0
frontend_port: 80
backend_port: 80
probe: prob0
'''
RETURN = '''
state:
description: Current state of the load balancer
returned: always
type: dict
changed:
description: Whether or not the resource has changed
returned: always
type: bool
'''
import random
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
frontend_ip_configuration_spec = dict(
name=dict(
type='str',
required=True
),
public_ip_address=dict(
type='str'
),
private_ip_address=dict(
type='str'
),
private_ip_allocation_method=dict(
type='str'
),
subnet=dict(
type='str'
)
)
backend_address_pool_spec = dict(
name=dict(
type='str',
required=True
)
)
probes_spec = dict(
name=dict(
type='str',
required=True
),
port=dict(
type='int',
required=True
),
protocol=dict(
type='str',
choices=['Tcp', 'Http'],
default='Tcp'
),
interval=dict(
type='int',
default=15
),
fail_count=dict(
type='int',
default=3,
aliases=['number_of_probes']
),
request_path=dict(
type='str'
)
)
inbound_nat_pool_spec = dict(
name=dict(
type='str',
required=True
),
frontend_ip_configuration_name=dict(
type='str',
required=True
),
protocol=dict(
type='str',
choices=['Tcp', 'Udp', 'All'],
default='Tcp'
),
frontend_port_range_start=dict(
type='int',
required=True
),
frontend_port_range_end=dict(
type='int',
required=True
),
backend_port=dict(
type='int',
required=True
)
)
load_balancing_rule_spec = dict(
name=dict(
type='str',
required=True
),
frontend_ip_configuration=dict(
type='str',
required=True
),
backend_address_pool=dict(
type='str',
required=True
),
probe=dict(
type='str',
required=True
),
protocol=dict(
type='str',
choices=['Tcp', 'Udp', 'All'],
default='Tcp'
),
load_distribution=dict(
type='str',
choices=['Default', 'SourceIP', 'SourceIPProtocol'],
default='Default'
),
frontend_port=dict(
type='int',
required=True
),
backend_port=dict(
type='int'
),
idle_timeout=dict(
type='int',
default=4
),
enable_floating_ip=dict(
type='bool'
)
)
class AzureRMLoadBalancer(AzureRMModuleBase):
"""Configuration class for an Azure RM load balancer resource"""
def __init__(self):
self.module_args = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str'
),
sku=dict(
type='str',
choices=['Basic', 'Standard']
),
frontend_ip_configurations=dict(
type='list',
elements='dict',
options=frontend_ip_configuration_spec
),
backend_address_pools=dict(
type='list',
elements='dict',
options=backend_address_pool_spec
),
probes=dict(
type='list',
elements='dict',
options=probes_spec
),
inbound_nat_pools=dict(
type='list',
elements='dict',
options=inbound_nat_pool_spec
),
load_balancing_rules=dict(
type='list',
elements='dict',
options=load_balancing_rule_spec
),
public_ip_address_name=dict(
type='str',
aliases=['public_ip_address', 'public_ip_name', 'public_ip']
),
probe_port=dict(
type='int'
),
probe_protocol=dict(
type='str',
choices=['Tcp', 'Http']
),
probe_interval=dict(
type='int',
default=15
),
probe_fail_count=dict(
type='int',
default=3
),
probe_request_path=dict(
type='str'
),
protocol=dict(
type='str',
choices=['Tcp', 'Udp']
),
load_distribution=dict(
type='str',
choices=['Default', 'SourceIP', 'SourceIPProtocol']
),
frontend_port=dict(
type='int'
),
backend_port=dict(
type='int'
),
idle_timeout=dict(
type='int',
default=4
),
natpool_frontend_port_start=dict(
type='int'
),
natpool_frontend_port_end=dict(
type='int'
),
natpool_backend_port=dict(
type='int'
),
natpool_protocol=dict(
type='str'
)
)
self.resource_group = None
self.name = None
self.location = None
self.sku = None
self.frontend_ip_configurations = None
self.backend_address_pools = None
self.probes = None
self.inbound_nat_pools = None
self.load_balancing_rules = None
self.public_ip_address_name = None
self.state = None
self.probe_port = None
self.probe_protocol = None
self.probe_interval = None
self.probe_fail_count = None
self.probe_request_path = None
self.protocol = None
self.load_distribution = None
self.frontend_port = None
self.backend_port = None
self.idle_timeout = None
self.natpool_frontend_port_start = None
self.natpool_frontend_port_end = None
self.natpool_backend_port = None
self.natpool_protocol = None
self.tags = None
self.results = dict(changed=False, state=dict())
super(AzureRMLoadBalancer, self).__init__(
derived_arg_spec=self.module_args,
supports_check_mode=True
)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_args.keys()) + ['tags']:
setattr(self, key, kwargs[key])
changed = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
load_balancer = self.get_load_balancer()
if self.state == 'present':
# compatible parameters
if not self.frontend_ip_configurations and not self.backend_address_pools and not self.probes and not self.inbound_nat_pools:
self.deprecate('Discrete load balancer config settings are deprecated and will be removed.'
' Use frontend_ip_configurations, backend_address_pools, probes, inbound_nat_pools lists instead.', version='2.9')
frontend_ip_name = 'frontendip0'
backend_address_pool_name = 'backendaddrp0'
prob_name = 'prob0'
inbound_nat_pool_name = 'inboundnatp0'
lb_rule_name = 'lbr'
self.frontend_ip_configurations = [dict(
name=frontend_ip_name,
public_ip_address=self.public_ip_address_name
)]
self.backend_address_pools = [dict(
name=backend_address_pool_name
)]
self.probes = [dict(
name=prob_name,
port=self.probe_port,
protocol=self.probe_protocol,
interval=self.probe_interval,
fail_count=self.probe_fail_count,
request_path=self.probe_request_path
)] if self.probe_protocol else None
self.inbound_nat_pools = [dict(
name=inbound_nat_pool_name,
frontend_ip_configuration_name=frontend_ip_name,
protocol=self.natpool_protocol,
frontend_port_range_start=self.natpool_frontend_port_start,
frontend_port_range_end=self.natpool_frontend_port_end,
backend_port=self.natpool_backend_port
)] if self.natpool_protocol else None
self.load_balancing_rules = [dict(
name=lb_rule_name,
frontend_ip_configuration=frontend_ip_name,
backend_address_pool=backend_address_pool_name,
probe=prob_name,
protocol=self.protocol,
load_distribution=self.load_distribution,
frontend_port=self.frontend_port,
backend_port=self.backend_port,
idle_timeout=self.idle_timeout,
enable_floating_ip=False
)] if self.protocol else None
if load_balancer:
# check update, NIE
changed = False
else:
changed = True
elif self.state == 'absent' and load_balancer:
changed = True
self.results['state'] = load_balancer_to_dict(load_balancer)
if 'tags' in self.results['state']:
update_tags, self.results['state']['tags'] = self.update_tags(self.results['state']['tags'])
if update_tags:
changed = True
else:
if self.tags:
changed = True
self.results['changed'] = changed
if self.state == 'present' and changed:
# create or update
frontend_ip_configurations_param = [self.network_models.FrontendIPConfiguration(
name=item.get('name'),
public_ip_address=self.get_public_ip_address_instance(item.get('public_ip_address')) if item.get('public_ip_address') else None,
private_ip_address=item.get('private_ip_address'),
private_ip_allocation_method=item.get('private_ip_allocation_method'),
subnet=self.network_models.Subnet(id=item.get('subnet')) if item.get('subnet') else None
) for item in self.frontend_ip_configurations] if self.frontend_ip_configurations else None
backend_address_pools_param = [self.network_models.BackendAddressPool(
name=item.get('name')
) for item in self.backend_address_pools] if self.backend_address_pools else None
probes_param = [self.network_models.Probe(
name=item.get('name'),
port=item.get('port'),
protocol=item.get('protocol'),
interval_in_seconds=item.get('interval'),
request_path=item.get('request_path'),
number_of_probes=item.get('fail_count')
) for item in self.probes] if self.probes else None
inbound_nat_pools_param = [self.network_models.InboundNatPool(
name=item.get('name'),
frontend_ip_configuration=self.network_models.SubResource(
frontend_ip_configuration_id(
self.subscription_id,
self.resource_group,
self.name,
item.get('frontend_ip_configuration_name'))),
protocol=item.get('protocol'),
frontend_port_range_start=item.get('frontend_port_range_start'),
frontend_port_range_end=item.get('frontend_port_range_end'),
backend_port=item.get('backend_port')
) for item in self.inbound_nat_pools] if self.inbound_nat_pools else None
load_balancing_rules_param = [self.network_models.LoadBalancingRule(
name=item.get('name'),
frontend_ip_configuration=self.network_models.SubResource(
frontend_ip_configuration_id(
self.subscription_id,
self.resource_group,
self.name,
item.get('frontend_ip_configuration')
)
),
backend_address_pool=self.network_models.SubResource(
backend_address_pool_id(
self.subscription_id,
self.resource_group,
self.name,
item.get('backend_address_pool')
)
),
probe=self.network_models.SubResource(
probe_id(
self.subscription_id,
self.resource_group,
self.name,
item.get('probe')
)
),
protocol=item.get('protocol'),
load_distribution=item.get('load_distribution'),
frontend_port=item.get('frontend_port'),
backend_port=item.get('backend_port'),
idle_timeout_in_minutes=item.get('idle_timeout'),
enable_floating_ip=item.get('enable_floating_ip')
) for item in self.load_balancing_rules] if self.load_balancing_rules else None
param = self.network_models.LoadBalancer(
sku=self.network_models.LoadBalancerSku(self.sku) if self.sku else None,
location=self.location,
tags=self.tags,
frontend_ip_configurations=frontend_ip_configurations_param,
backend_address_pools=backend_address_pools_param,
probes=probes_param,
inbound_nat_pools=inbound_nat_pools_param,
load_balancing_rules=load_balancing_rules_param
)
self.results['state'] = self.create_or_update_load_balancer(param)
elif self.state == 'absent' and changed:
self.delete_load_balancer()
self.results['state'] = None
return self.results
def get_public_ip_address_instance(self, id):
"""Get a reference to the public ip address resource"""
self.log('Fetching public ip address {}'.format(id))
resource_id = format_resource_id(id, self.subscription_id, 'Microsoft.Network', 'publicIPAddresses', self.resource_group)
return self.network_models.PublicIPAddress(id=resource_id)
def get_load_balancer(self):
"""Get a load balancer"""
self.log('Fetching loadbalancer {0}'.format(self.name))
try:
return self.network_client.load_balancers.get(self.resource_group, self.name)
except CloudError:
return None
def delete_load_balancer(self):
"""Delete a load balancer"""
self.log('Deleting loadbalancer {0}'.format(self.name))
try:
poller = self.network_client.load_balancers.delete(self.resource_group, self.name)
return self.get_poller_result(poller)
except CloudError as exc:
self.fail("Error deleting loadbalancer {0} - {1}".format(self.name, str(exc)))
def create_or_update_load_balancer(self, param):
try:
poller = self.network_client.load_balancers.create_or_update(self.resource_group, self.name, param)
new_lb = self.get_poller_result(poller)
return load_balancer_to_dict(new_lb)
except CloudError as exc:
self.fail("Error creating or updating load balancer {0} - {1}".format(self.name, str(exc)))
def load_balancer_to_dict(load_balancer):
"""Seralialize a LoadBalancer object to a dict"""
if not load_balancer:
return dict()
result = dict(
id=load_balancer.id,
name=load_balancer.name,
location=load_balancer.location,
sku=load_balancer.sku.name,
tags=load_balancer.tags,
provisioning_state=load_balancer.provisioning_state,
etag=load_balancer.etag,
frontend_ip_configurations=[],
backend_address_pools=[],
load_balancing_rules=[],
probes=[],
inbound_nat_rules=[],
inbound_nat_pools=[],
outbound_nat_rules=[]
)
if load_balancer.frontend_ip_configurations:
result['frontend_ip_configurations'] = [dict(
id=_.id,
name=_.name,
etag=_.etag,
provisioning_state=_.provisioning_state,
private_ip_address=_.private_ip_address,
private_ip_allocation_method=_.private_ip_allocation_method,
subnet=dict(
id=_.subnet.id,
name=_.subnet.name,
address_prefix=_.subnet.address_prefix
) if _.subnet else None,
public_ip_address=dict(
id=_.public_ip_address.id,
location=_.public_ip_address.location,
public_ip_allocation_method=_.public_ip_address.public_ip_allocation_method,
ip_address=_.public_ip_address.ip_address
) if _.public_ip_address else None
) for _ in load_balancer.frontend_ip_configurations]
if load_balancer.backend_address_pools:
result['backend_address_pools'] = [dict(
id=_.id,
name=_.name,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.backend_address_pools]
if load_balancer.load_balancing_rules:
result['load_balancing_rules'] = [dict(
id=_.id,
name=_.name,
protocol=_.protocol,
frontend_ip_configuration_id=_.frontend_ip_configuration.id,
backend_address_pool_id=_.backend_address_pool.id,
probe_id=_.probe.id,
load_distribution=_.load_distribution,
frontend_port=_.frontend_port,
backend_port=_.backend_port,
idle_timeout_in_minutes=_.idle_timeout_in_minutes,
enable_floating_ip=_.enable_floating_ip,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.load_balancing_rules]
if load_balancer.probes:
result['probes'] = [dict(
id=_.id,
name=_.name,
protocol=_.protocol,
port=_.port,
interval_in_seconds=_.interval_in_seconds,
number_of_probes=_.number_of_probes,
request_path=_.request_path,
provisioning_state=_.provisioning_state
) for _ in load_balancer.probes]
if load_balancer.inbound_nat_rules:
result['inbound_nat_rules'] = [dict(
id=_.id,
name=_.name,
frontend_ip_configuration_id=_.frontend_ip_configuration.id,
protocol=_.protocol,
frontend_port=_.frontend_port,
backend_port=_.backend_port,
idle_timeout_in_minutes=_.idle_timeout_in_minutes,
enable_floating_point_ip=_.enable_floating_point_ip if hasattr(_, 'enable_floating_point_ip') else False,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.inbound_nat_rules]
if load_balancer.inbound_nat_pools:
result['inbound_nat_pools'] = [dict(
id=_.id,
name=_.name,
frontend_ip_configuration_id=_.frontend_ip_configuration.id,
protocol=_.protocol,
frontend_port_range_start=_.frontend_port_range_start,
frontend_port_range_end=_.frontend_port_range_end,
backend_port=_.backend_port,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.inbound_nat_pools]
if load_balancer.outbound_nat_rules:
result['outbound_nat_rules'] = [dict(
id=_.id,
name=_.name,
allocated_outbound_ports=_.allocated_outbound_ports,
frontend_ip_configuration_id=_.frontend_ip_configuration.id,
backend_address_pool=_.backend_address_pool.id,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.outbound_nat_rules]
return result
def frontend_ip_configuration_id(subscription_id, resource_group_name, load_balancer_name, name):
"""Generate the id for a frontend ip configuration"""
return '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(
subscription_id,
resource_group_name,
load_balancer_name,
name
)
def backend_address_pool_id(subscription_id, resource_group_name, load_balancer_name, name):
"""Generate the id for a backend address pool"""
return '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.format(
subscription_id,
resource_group_name,
load_balancer_name,
name
)
def probe_id(subscription_id, resource_group_name, load_balancer_name, name):
"""Generate the id for a probe"""
return '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(
subscription_id,
resource_group_name,
load_balancer_name,
name
)
def main():
"""Main execution"""
AzureRMLoadBalancer()
if __name__ == '__main__':
main()
| gpl-3.0 |
vrenaville/hr | __unported__/hr_policy_presence/hr_policy_presence.py | 27 | 3008 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import fields, orm
class policy_presence(orm.Model):
_name = 'hr.policy.presence'
_columns = {
'name': fields.char('Name', size=128, required=True),
'date': fields.date('Effective Date', required=True),
'work_days_per_month': fields.integer(
'Working Days/Month', required=True),
'line_ids': fields.one2many(
'hr.policy.line.presence', 'policy_id', 'Policy Lines'),
}
_defaults = {
'work_days_per_month': 26,
}
# Return records with latest date first
_order = 'date desc'
def get_codes(self, cr, uid, idx, context=None):
res = []
[res.append(
(line.code, line.name, line.type, line.rate, line.duration))
for line in self.browse(cr, uid, idx, context=context).line_ids]
return res
class policy_line_presence(orm.Model):
_name = 'hr.policy.line.presence'
_columns = {
'name': fields.char('Name', size=64, required=True),
'policy_id': fields.many2one('hr.policy.presence', 'Policy'),
'code': fields.char(
'Code', required=True, help="Use this code in the salary rules."),
'rate': fields.float(
'Rate', required=True, help='Multiplier of employee wage.'),
'type': fields.selection([('normal', 'Normal Working Hours'),
('holiday', 'Holidays'),
('restday', 'Rest Days')],
'Type', required=True),
'active_after': fields.integer(
'Active After',
required=True,
help='Minutes after first punch of the day in which policy will '
'take effect.'
),
'duration': fields.integer(
'Duration', required=True, help="In minutes.")
}
_defaults = {
'rate': 1.0,
}
class policy_group(orm.Model):
_name = 'hr.policy.group'
_inherit = 'hr.policy.group'
_columns = {
'presence_policy_ids': fields.many2many(
'hr.policy.presence', 'hr_policy_group_presence_rel',
'group_id', 'presence_id', 'Presence Policy'),
}
| agpl-3.0 |
fatherlinux/atomic-reactor | atomic_reactor/plugins/pre_check_and_set_rebuild.py | 1 | 3466 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import json
import os
from osbs.api import OSBS
from osbs.conf import Configuration
from atomic_reactor.plugin import PreBuildPlugin
def is_rebuild(workflow):
return (CheckAndSetRebuildPlugin.key in workflow.prebuild_results and
workflow.prebuild_results[CheckAndSetRebuildPlugin.key])
class CheckAndSetRebuildPlugin(PreBuildPlugin):
"""
Determine whether this is an automated rebuild
This plugin checks for a specific label in the OSv3 Build
metadata. If it exists and has the value specified in the
configuration, this build is a rebuild. The module-level function
'is_rebuild()' can be used by other plugins to determine this.
After checking for the label, it sets the label in the
metadata, allowing future automated rebuilds to be detected as
rebuilds.
Example configuration:
{
"name": "check_and_set_rebuild",
"args": {
"label_key": "rebuild",
"label_value": "true",
"url": "https://localhost:8443/"
}
}
"""
key = "check_and_set_rebuild"
can_fail = False # We really want to stop the process
def __init__(self, tasker, workflow, label_key, label_value,
url, verify_ssl=True, use_auth=True):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param label_key: str, key of label used to indicate first build
:param label_value: str, value of label used to indicate first build
:param url: str, URL to OSv3 instance
:param verify_ssl: bool, verify SSL certificate?
:param use_auth: bool, initiate authentication with OSv3?
"""
# call parent constructor
super(CheckAndSetRebuildPlugin, self).__init__(tasker, workflow)
self.label_key = label_key
self.label_value = label_value
self.url = url
self.verify_ssl = verify_ssl
self.use_auth = use_auth
def run(self):
"""
run the plugin
"""
try:
build_json = json.loads(os.environ["BUILD"])
except KeyError:
self.log.error("No $BUILD env variable. Probably not running in build container")
raise
metadata = build_json.get("metadata", {})
labels = metadata.get("labels", {})
buildconfig = labels["buildconfig"]
is_rebuild = labels.get(self.label_key) == self.label_value
self.log.info("This is a rebuild? %s", is_rebuild)
if not is_rebuild:
# Update the BuildConfig metadata so the next Build
# instantiated from it is detected as being an automated
# rebuild
kwargs = {}
if 'namespace' in metadata:
kwargs['namespace'] = metadata['namespace']
osbs_conf = Configuration(conf_file=None, openshift_uri=self.url,
use_auth=self.use_auth,
verify_ssl=self.verify_ssl)
osbs = OSBS(osbs_conf, osbs_conf)
labels = {self.label_key: self.label_value}
osbs.set_labels_on_build_config(buildconfig, labels, **kwargs)
return is_rebuild
| bsd-3-clause |
jessehagberg/python-playground | ex3.py | 1 | 1289 | # This line prints out a string about counting chickens.
print "I will now count my chickens:"
# This line prints the string "Hens" concatenated with the value derived from dividing 30 by 6 and then adding 5. (which is 30)
print "Hens", 25.0 + 30 / 6
# Prints Roosters although I do not understand how the result is 97.
print "Roosters", 100.0 - 25 * 3 % 4
print 25.0 * 3 % 4
# print out a string
print "Now I will count the eggs:"
#The next line will evaluate and print a single number
print 3.0 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6
# prints out a string
print "Is it true that 3 + 2 < 5 - 7?"
# Print out a True/False evaluation
print 3.0 + 2 < 5 - 7
#Print out a string concatenated by a single evaluated number
print "What is 3 + 2?", 3.0 + 2
#Print out a string concatenated by a single evaluated number
print "What is 5 - 7?", 5.0 - 7
#print a string
print "Oh, that's why it's False."
#print a string
print "How about some more."
#print a string followed by a true/false mathematical evaluation.
print "Is it greater?", 5.0 > -2
#print a string followed by a true/false mathematical evaluation.
print "Is it greater or equal?", 5.0 >= -2
#print a string followed by a true/false mathematical evaluation.
print "Is it less or equal?", 5.0 <= -2 | cc0-1.0 |
adamtiger/tensorflow | tensorflow/python/keras/_impl/keras/engine/topology.py | 6 | 54348 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Base layer code and base model (Container) code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import numpy as np
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.utils import conv_utils
from tensorflow.python.keras._impl.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.keras._impl.keras.utils.layer_utils import print_summary as print_layer_summary
from tensorflow.python.layers import base as tf_base_layers
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-import-not-at-top
try:
import h5py
except ImportError:
h5py = None
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
# pylint: disable=invalid-name
InputSpec = tf_base_layers.InputSpec
Node = tf_base_layers.Node
TFBaseLayer = tf_base_layers.Layer
# pylint: enable=invalid-name
class Layer(tf_base_layers.Layer):
"""Abstract base layer class.
# Properties
name: String, must be unique within a model.
input_spec: List of InputSpec class instances
each entry describes one required input:
- ndim
- dtype
A layer with `n` input tensors must have
an `input_spec` of length `n`.
trainable: Boolean, whether the layer weights
will be updated during training.
uses_learning_phase: Whether any operation
of the layer uses `K.in_training_phase()`
or `K.in_test_phase()`.
input_shape: Shape tuple. Provided for convenience,
but note that there may be cases in which this
attribute is ill-defined (e.g. a shared layer
with multiple input shapes), in which case
requesting `input_shape` will raise an Exception.
Prefer using `layer.get_input_shape_for(input_shape)`,
or `layer.get_input_shape_at(node_index)`.
output_shape: Shape tuple. See above.
inbound_nodes: List of nodes.
outbound_nodes: List of nodes.
input, output: Input/output tensor(s). Note that if the layer is used
more than once (shared layer), this is ill-defined
and will raise an exception. In such cases, use
`layer.get_input_at(node_index)`.
input_mask, output_mask: Same as above, for masks.
trainable_weights: List of variables.
non_trainable_weights: List of variables.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
# Methods
call(x, mask=None): Where the layer's logic lives.
__call__(x, mask=None): Wrapper around the layer logic (`call`).
If x is a Keras tensor:
- Connect current layer with last layer from tensor:
`self._add_inbound_node(last_layer)`
- Add layer to tensor history
If layer is not built:
- Build from inputs shape
get_weights()
set_weights(weights)
get_config()
count_params()
_compute_output_shape(input_shape)
compute_mask(x, mask)
get_input_at(node_index)
get_output_at(node_index)
get_input_shape_at(node_index)
get_output_shape_at(node_index)
get_input_mask_at(node_index)
get_output_mask_at(node_index)
# Class Methods
from_config(config)
# Internal methods:
build(input_shape)
_add_inbound_node(layer, index=0)
"""
def __init__(self, **kwargs):
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_shape',
'batch_input_shape',
'batch_size',
'dtype',
'name',
'trainable',
'weights',
}
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
# Get layer name.
name = kwargs.get('name')
# Get `trainable` status.
trainable = kwargs.get('trainable', True)
# Get `dtype`.
dtype = kwargs.get('dtype')
if dtype is None:
dtype = K.floatx()
# Call super, which will set all properties common to Keras layers
# and core TF layers.
super(Layer, self).__init__(name=name, dtype=dtype, trainable=trainable)
# Add properties that are Keras-only for now.
self.supports_masking = False
# Manage input shape information if passed.
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self.batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
if 'weights' in kwargs:
self._initial_weights = kwargs['weights']
else:
self._initial_weights = None
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
constraint=None):
"""Adds a weight variable to the layer.
Arguments:
name: String, the name for the weight variable.
shape: The shape tuple of the weight.
dtype: The dtype of the weight.
initializer: An Initializer instance (callable).
regularizer: An optional Regularizer instance.
trainable: A boolean, whether the weight should
be trained via backprop or not (assuming
that the layer itself is also trainable).
constraint: An optional Constraint instance.
Returns:
The created weight variable.
"""
if dtype is None:
dtype = K.floatx()
weight = self.add_variable(name, shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
constraint=constraint,
trainable=trainable)
return weight
def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
Arguments:
inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments.
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
def __call__(self, inputs, **kwargs):
"""Wrapper around self.call(), for handling internal references.
If a Keras tensor is passed:
- We call self._add_inbound_node().
- If necessary, we `build` the layer to match
the shape of the input(s).
- We update the _keras_history of the output tensor(s)
with the current layer.
This is done as part of _add_inbound_node().
Arguments:
inputs: Can be a tensor or list/tuple of tensors.
**kwargs: Additional keyword arguments to be passed to `call()`.
Returns:
Output of the layer's `call` method.
Raises:
ValueError: in case the layer is missing shape information
for its `build` call.
"""
# Actually call the layer (optionally building it).
output = super(Layer, self).__call__(inputs, **kwargs)
# Update learning phase info.
output_tensors = _to_list(output)
uses_lp = any(
[getattr(x, '_uses_learning_phase', False) for x in _to_list(inputs)])
uses_lp = getattr(self, 'uses_learning_phase', False) or uses_lp
for i in range(len(output_tensors)):
output_tensors[i]._uses_learning_phase = getattr(
output_tensors[i], '_uses_learning_phase', False) or uses_lp
# Optionally load weight values that were specified at layer instantiation.
if hasattr(self, '_initial_weights') and self._initial_weights is not None:
self.set_weights(self._initial_weights)
del self._initial_weights
return output
def _compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
Assumes that the layer will be built
to match that input shape provided.
Arguments:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if isinstance(input_shape, list):
return [tensor_shape.TensorShape(shape) for shape in input_shape]
else:
return tensor_shape.TensorShape(input_shape)
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Arguments:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self.supports_masking:
if mask is not None:
if isinstance(mask, list):
if any(m is not None for m in mask):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
else:
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
def set_weights(self, weights):
"""Sets the weights of the layer, from Numpy arrays.
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError('You called `set_weights(weights)` on layer "' +
self.name + '" with a weight list of length ' +
str(len(weights)) + ', but the layer was expecting ' +
str(len(params)) + ' weights. Provided weights: ' +
str(weights)[:50] + '...')
if not params:
return
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Layer weight shape ' + str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current weights of the layer.
Returns:
Weights values as a list of numpy arrays.
"""
params = self.weights
return K.batch_get_value(params)
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Container` (one layer of abstraction above).
Returns:
Python dictionary.
"""
config = {'name': self.name, 'trainable': self.trainable}
if hasattr(self, 'batch_input_shape'):
config['batch_input_shape'] = self.batch_input_shape
if hasattr(self, 'dtype'):
config['dtype'] = self.dtype
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Container), nor weights (handled by `set_weights`).
Arguments:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
class InputLayer(tf_base_layers.InputLayer, Layer):
"""Layer to be used as an entry point into a graph.
It can either wrap an existing tensor (pass an `input_tensor` argument)
or create its a placeholder tensor (pass argument `input_shape`.
Arguments:
input_shape: Shape tuple, not including the batch axis.
batch_size: Optional input batch size (integer or None).
dtype: Datatype of the input.
input_tensor: Optional tensor to use as layer input
instead of creating a placeholder.
sparse: Boolean, whether the placeholder created
is meant to be sparse.
name: Name of the layer (string).
"""
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=False,
name=None,
**kwargs):
if 'batch_input_shape' in kwargs:
batch_input_shape = kwargs.pop('batch_input_shape')
if input_shape and batch_input_shape:
raise ValueError('Only provide the input_shape OR '
'batch_input_shape argument to '
'InputLayer, not both at the same time.')
batch_size = batch_input_shape[0]
input_shape = batch_input_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if not name:
prefix = 'input'
name = prefix + '_' + str(K.get_uid(prefix))
if not dtype:
if input_tensor is None:
dtype = K.floatx()
else:
dtype = K.dtype(input_tensor)
super(InputLayer, self).__init__(input_shape=input_shape,
batch_size=batch_size,
dtype=dtype,
input_tensor=input_tensor,
sparse=sparse,
name=name)
def get_config(self):
config = {
'batch_input_shape': self.batch_input_shape,
'dtype': self.dtype,
'sparse': self.sparse,
'name': self.name
}
return config
def Input( # pylint: disable=invalid-name
shape=None,
batch_size=None,
name=None,
dtype=None,
sparse=False,
tensor=None,
**kwargs):
"""`Input()` is used to instantiate a Keras tensor.
A Keras tensor is a tensor object from the underlying backend
(Theano or TensorFlow), which we augment with certain
attributes that allow us to build a Keras model
just by knowing the inputs and outputs of the model.
For instance, if a, b and c are Keras tensors,
it becomes possible to do:
`model = Model(input=[a, b], output=c)`
The added Keras attribute is:
`_keras_history`: Last layer applied to the tensor.
the entire layer graph is retrievable from that layer,
recursively.
Arguments:
shape: A shape tuple (integers), not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors.
batch_size: optional static batch size (integer).
name: An optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
sparse: A boolean specifying whether the placeholder
to be created is sparse.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will not create a placeholder tensor.
**kwargs: deprecated arguments support.
Returns:
A tensor.
Example:
```python
# this is a logistic regression in Keras
x = Input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
Raises:
ValueError: in case of invalid arguments.
"""
if 'batch_shape' in kwargs:
batch_shape = kwargs.pop('batch_shape')
if shape and batch_shape:
raise ValueError('Only provide the shape OR '
'batch_shape argument to '
'Input, not both at the same time.')
batch_size = batch_shape[0]
shape = batch_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if dtype is None:
dtype = K.floatx()
if not shape and tensor is None:
raise ValueError('Please provide to Input either a `shape`'
' or a `tensor` argument. Note that '
'`shape` does not include the batch '
'dimension.')
input_layer = InputLayer(
input_shape=shape,
batch_size=batch_size,
name=name,
dtype=dtype,
sparse=sparse,
input_tensor=tensor)
# Return tensor including `_keras_history`.
# Note that in this case train_output and test_output are the same pointer.
outputs = input_layer.inbound_nodes[0].output_tensors
if len(outputs) == 1:
return outputs[0]
else:
return outputs
class Network(tf_base_layers.Network, Layer):
"""A Container is a directed acyclic graph of layers.
It is the topological form of a "model". A Model
is simply a Container with added training routines.
# Properties
name
inputs
outputs
input_layers
output_layers
input_spec (list of class instances)
each entry describes one required input:
- ndim
- dtype
trainable (boolean)
input_shape
output_shape
inbound_nodes: list of nodes
outbound_nodes: list of nodes
trainable_weights (list of variables)
non_trainable_weights (list of variables)
# Methods
summary
get_layer
get_weights
set_weights
get_config
compute_output_shape
# Class Methods
from_config
"""
def __init__(self, inputs, outputs, name=None):
super(Network, self).__init__(inputs, outputs, name=name)
self.supports_masking = False
# Fill in the output mask cache.
masks = []
for x in self.inputs:
mask = x._keras_mask if hasattr(x, '_keras_mask') else None
masks.append(mask)
mask_cache_key = (tf_base_layers._object_list_uid(self.inputs) + '_' +
tf_base_layers._object_list_uid(masks))
masks = []
for x in self.outputs:
mask = x._keras_mask if hasattr(x, '_keras_mask') else None
masks.append(mask)
if len(masks) == 1:
mask = masks[0]
else:
mask = masks
self._output_mask_cache[mask_cache_key] = mask
# Build self.input_names and self.output_names.
self.input_names = []
self.output_names = []
self._feed_input_names = []
self._feed_inputs = []
self._feed_input_shapes = []
for i, layer in enumerate(self._input_layers):
self.input_names.append(layer.name)
if layer.is_placeholder:
self._feed_input_names.append(layer.name)
self._feed_inputs.append(layer.input)
self._feed_input_shapes.append(K.int_shape(self.inputs[i]))
for layer in self._output_layers:
self.output_names.append(layer.name)
self.internal_input_shapes = [K.int_shape(x) for x in self.inputs]
self.internal_output_shapes = [K.int_shape(x) for x in self.outputs]
@property
def uses_learning_phase(self):
return any(
[getattr(x, '_uses_learning_phase', False) for x in self.outputs])
@property
def stateful(self):
return any([(hasattr(layer, 'stateful') and layer.stateful)
for layer in self.layers])
def reset_states(self):
for layer in self.layers:
if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):
layer.reset_states()
@property
def state_updates(self):
"""Returns the `updates` from all layers that are stateful.
This is useful for separating training updates and
state updates, e.g. when we need to update a layer's internal state
during prediction.
Returns:
A list of update ops.
"""
state_updates = []
for layer in self.layers:
if getattr(layer, 'stateful', False):
if hasattr(layer, 'updates'):
state_updates += layer.updates
return state_updates
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
weights = []
for layer in self.layers:
weights += layer.weights
return K.batch_get_value(weights)
def set_weights(self, weights):
"""Sets the weights of the model.
Arguments:
weights: A list of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
tuples = []
for layer in self.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
tuples.append((sw, w))
weights = weights[num_param:]
K.batch_set_value(tuples)
def compute_mask(self, inputs, mask):
inputs = _to_list(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = _to_list(mask)
cache_key = ','.join([str(id(x)) for x in inputs])
cache_key += '_' + ','.join([str(id(x)) for x in masks])
if cache_key in self._output_mask_cache:
return self._output_mask_cache[cache_key]
else:
_, output_masks, _ = self._run_internal_graph(inputs, masks)
return output_masks
def get_config(self):
config = {
'name': self.name,
}
node_conversion_map = {}
for layer in self.layers:
if issubclass(layer.__class__, Network):
# Containers start with a pre-existing node
# linking their input to output.
kept_nodes = 1
else:
kept_nodes = 0
for original_node_index, node in enumerate(layer.inbound_nodes):
node_key = tf_base_layers._make_node_key(layer.name,
original_node_index)
if node_key in self._network_nodes:
node_conversion_map[node_key] = kept_nodes
kept_nodes += 1
layer_configs = []
for layer in self.layers: # From the earliest layers on.
layer_class_name = layer.__class__.__name__
layer_config = layer.get_config()
filtered_inbound_nodes = []
for original_node_index, node in enumerate(layer.inbound_nodes):
node_key = tf_base_layers._make_node_key(layer.name,
original_node_index)
if node_key in self._network_nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
if node.arguments:
try:
json.dumps(node.arguments)
kwargs = node.arguments
except TypeError:
logging.warning(
'Layer ' + layer.name +
' was passed non-serializable keyword arguments: ' +
str(node.arguments) + '. They will not be included '
'in the serialized model (and thus will be missing '
'at deserialization time).')
kwargs = {}
else:
kwargs = {}
if node.inbound_layers:
node_data = []
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i]
node_index = node.node_indices[i]
tensor_index = node.tensor_indices[i]
node_key = tf_base_layers._make_node_key(inbound_layer.name,
node_index)
new_node_index = node_conversion_map.get(node_key, 0)
node_data.append(
[inbound_layer.name, new_node_index, tensor_index, kwargs])
filtered_inbound_nodes.append(node_data)
layer_configs.append({
'name': layer.name,
'class_name': layer_class_name,
'config': layer_config,
'inbound_nodes': filtered_inbound_nodes,
})
config['layers'] = layer_configs
# Gather info about inputs and outputs.
model_inputs = []
for i in range(len(self._input_layers)):
layer, node_index, tensor_index = self._input_coordinates[i]
node_key = tf_base_layers._make_node_key(layer.name,
node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_inputs.append([layer.name, new_node_index, tensor_index])
config['input_layers'] = model_inputs
model_outputs = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
node_key = tf_base_layers._make_node_key(layer.name,
node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_outputs.append([layer.name, new_node_index, tensor_index])
config['output_layers'] = model_outputs
return copy.deepcopy(config)
@classmethod
def from_config(cls, config, custom_objects=None):
"""Instantiates a Model from its config (output of `get_config()`).
Arguments:
config: Model config dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A model instance.
Raises:
ValueError: In case of improperly formatted config dict.
"""
# Layer instances created during
# the graph reconstruction process
created_layers = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
"""Deserialize a node.
Arguments:
layer: layer instance.
node_data: node config dict.
Raises:
ValueError: In case of improperly formatted `node_data` dict.
"""
input_tensors = []
for input_data in node_data:
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
else:
raise ValueError('Improperly formatted model config.')
if inbound_layer_name not in created_layers:
add_unprocessed_node(layer, node_data)
return
inbound_layer = created_layers[inbound_layer_name]
if len(inbound_layer.inbound_nodes) <= inbound_node_index:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer.inbound_nodes[inbound_node_index]
input_tensors.append(inbound_node.output_tensors[inbound_tensor_index])
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
if input_tensors:
if len(input_tensors) == 1:
layer(input_tensors[0], **kwargs)
else:
layer(input_tensors, **kwargs)
def process_layer(layer_data):
"""Deserialize a layer, then call it on appropriate inputs.
Arguments:
layer_data: layer config dict.
Raises:
ValueError: In case of improperly formatted `layer_data` dict.
"""
layer_name = layer_data['name']
# Instantiate layer.
from tensorflow.python.keras._impl.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(layer_data, custom_objects=custom_objects)
created_layers[layer_name] = layer
# Gather layer inputs.
inbound_nodes_data = layer_data['inbound_nodes']
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config['layers']:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
for node_data in unprocessed_nodes.pop(layer):
process_node(layer, node_data)
name = config.get('name')
input_tensors = []
output_tensors = []
for layer_data in config['input_layers']:
layer_name, node_index, tensor_index = layer_data
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer.inbound_nodes[node_index].output_tensors
input_tensors.append(layer_output_tensors[tensor_index])
for layer_data in config['output_layers']:
layer_name, node_index, tensor_index = layer_data
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer.inbound_nodes[node_index].output_tensors
output_tensors.append(layer_output_tensors[tensor_index])
return cls(inputs=input_tensors, outputs=output_tensors, name=name)
def save(self, filepath, overwrite=True, include_optimizer=True):
"""Save the model to a single HDF5 file.
The savefile includes:
- The model architecture, allowing to re-instantiate the model.
- The model weights.
- The state of the optimizer, allowing to resume training
exactly where you left off.
This allows you to save the entirety of the state of a model
in a single file.
Saved models can be reinstantiated via `keras.models.load_model`.
The model returned by `load_model`
is a compiled model ready to be used (unless the saved model
was never compiled in the first place).
Arguments:
filepath: String, path to the file to save the weights to.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
Example:
```python
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
```
"""
from tensorflow.python.keras._impl.keras.models import save_model # pylint: disable=g-import-not-at-top
save_model(self, filepath, overwrite, include_optimizer)
def save_weights(self, filepath, overwrite=True):
"""Dumps all layer weights to a HDF5 file.
The weight file has:
- `layer_names` (attribute), a list of strings
(ordered names of model layers).
- For every layer, a `group` named `layer.name`
- For every such layer group, a group attribute `weight_names`,
a list of strings
(ordered names of weights tensor of the layer).
- For every weight in the layer, a dataset
storing the weight value, named after the weight tensor.
Arguments:
filepath: String, path to the file to save the weights to.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
Raises:
ImportError: If h5py is not available.
"""
if h5py is None:
raise ImportError('`save_weights` requires h5py.')
# If file exists and should not be overwritten:
if not overwrite and os.path.isfile(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
f = h5py.File(filepath, 'w')
save_weights_to_hdf5_group(f, self.layers)
f.flush()
f.close()
def load_weights(self, filepath, by_name=False):
"""Loads all layer weights from a HDF5 save file.
If `by_name` is False (default) weights are loaded
based on the network's topology, meaning the architecture
should be the same as when the weights were saved.
Note that layers that don't have weights are not taken
into account in the topological ordering, so adding or
removing layers is fine as long as they don't have weights.
If `by_name` is True, weights are loaded into layers
only if they share the same name. This is useful
for fine-tuning or transfer-learning models where
some of the layers have changed.
Arguments:
filepath: String, path to the weights file to load.
by_name: Boolean, whether to load weights by name
or by topological order.
Raises:
ImportError: If h5py is not available.
"""
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
if by_name:
load_weights_from_hdf5_group_by_name(f, self.layers)
else:
load_weights_from_hdf5_group(f, self.layers)
if hasattr(f, 'close'):
f.close()
def _updated_config(self):
"""Util hared between different serialization methods.
Returns:
Model config with Keras version information added.
"""
from tensorflow.python.keras._impl.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version,
'backend': K.backend()
}
return model_config
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
Returns:
A JSON string.
"""
def get_json_type(obj):
# If obj is any numpy type
if type(obj).__module__ == np.__name__:
return obj.item()
# If obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
model_config = self._updated_config()
return json.dumps(model_config, default=get_json_type, **kwargs)
def to_yaml(self, **kwargs):
"""Returns a yaml string containing the network configuration.
To load a network from a yaml save file, use
`keras.models.model_from_yaml(yaml_string, custom_objects={})`.
`custom_objects` should be a dictionary mapping
the names of custom losses / layers / etc to the corresponding
functions / classes.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `yaml.dump()`.
Returns:
A YAML string.
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError('Requires yaml module installed.')
return yaml.dump(self._updated_config(), **kwargs)
def summary(self, line_length=None, positions=None, print_fn=None):
"""Prints a string summary of the network.
Arguments:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided,
defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use. Defaults to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
"""
print_layer_summary(self,
line_length=line_length,
positions=positions,
print_fn=print_fn)
# Alias for legacy support.
Container = Network
def get_source_inputs(tensor, layer=None, node_index=None):
"""Returns the list of input tensors necessary to compute `tensor`.
Output will always be a list of tensors
(potentially with 1 element).
Arguments:
tensor: The tensor to start from.
layer: Origin layer of the tensor. Will be
determined via tensor._keras_history if not provided.
node_index: Origin node index of the tensor.
Returns:
List of input tensors.
"""
if not hasattr(tensor, '_keras_history'):
return tensor
if layer is None or node_index:
layer, node_index, _ = tensor._keras_history
if not layer.inbound_nodes:
return [tensor]
else:
node = layer.inbound_nodes[node_index]
if not node.inbound_layers:
# Reached an Input layer, stop recursion.
return node.input_tensors
else:
source_tensors = []
for i in range(len(node.inbound_layers)):
x = node.input_tensors[i]
layer = node.inbound_layers[i]
node_index = node.node_indices[i]
previous_sources = get_source_inputs(x, layer, node_index)
# Avoid input redundancy.
for x in previous_sources:
if x not in source_tensors:
source_tensors.append(x)
return source_tensors
def _to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Arguments:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def save_weights_to_hdf5_group(f, layers):
from tensorflow.python.keras._impl.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
f.attrs['layer_names'] = [layer.name.encode('utf8') for layer in layers]
f.attrs['backend'] = K.backend().encode('utf8')
f.attrs['keras_version'] = str(keras_version).encode('utf8')
for layer in layers:
g = f.create_group(layer.name)
symbolic_weights = layer.weights
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
if hasattr(w, 'name') and w.name:
name = str(w.name)
else:
name = 'param_' + str(i)
weight_names.append(name.encode('utf8'))
g.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
def preprocess_weights_for_loading(layer,
weights,
original_keras_version=None,
original_backend=None):
"""Converts layers weights from Keras 1 format to Keras 2.
Arguments:
layer: Layer instance.
weights: List of weights values (Numpy arrays).
original_keras_version: Keras version for the weights, as a string.
original_backend: Keras backend the weights were trained with,
as a string.
Returns:
A list of weights values (Numpy arrays).
"""
if original_keras_version == '1':
if layer.__class__.__name__ == 'Bidirectional':
num_weights_per_layer = len(weights) // 2
forward_weights = preprocess_weights_for_loading(
layer.forward_layer, weights[:num_weights_per_layer],
original_keras_version, original_backend)
backward_weights = preprocess_weights_for_loading(
layer.backward_layer, weights[num_weights_per_layer:],
original_keras_version, original_backend)
weights = forward_weights + backward_weights
if layer.__class__.__name__ == 'TimeDistributed':
weights = preprocess_weights_for_loading(
layer.layer, weights, original_keras_version, original_backend)
if layer.__class__.__name__ == 'Conv1D':
shape = weights[0].shape
# Handle Keras 1.1 format
if shape[:2] != (layer.kernel_size[0], 1) or shape[3] != layer.filters:
# Legacy shape:
# (filters, input_dim, filter_length, 1)
assert shape[0] == layer.filters and shape[2:] == (layer.kernel_size[0],
1)
weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
weights[0] = weights[0][:, 0, :, :]
if layer.__class__.__name__ == 'Conv2D':
if layer.data_format == 'channels_first':
# old: (filters, stack_size, kernel_rows, kernel_cols)
# new: (kernel_rows, kernel_cols, stack_size, filters)
weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
if layer.__class__.__name__ == 'Conv2DTranspose':
if layer.data_format == 'channels_last':
# old: (kernel_rows, kernel_cols, stack_size, filters)
# new: (kernel_rows, kernel_cols, filters, stack_size)
weights[0] = np.transpose(weights[0], (0, 1, 3, 2))
if layer.data_format == 'channels_first':
# old: (filters, stack_size, kernel_rows, kernel_cols)
# new: (kernel_rows, kernel_cols, filters, stack_size)
weights[0] = np.transpose(weights[0], (2, 3, 0, 1))
if layer.__class__.__name__ == 'Conv3D':
if layer.data_format == 'channels_first':
# old: (filters, stack_size, ...)
# new: (..., stack_size, filters)
weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))
if layer.__class__.__name__ == 'GRU':
if len(weights) == 9:
kernel = np.concatenate([weights[0], weights[3], weights[6]], axis=-1)
recurrent_kernel = np.concatenate(
[weights[1], weights[4], weights[7]], axis=-1)
bias = np.concatenate([weights[2], weights[5], weights[8]], axis=-1)
weights = [kernel, recurrent_kernel, bias]
if layer.__class__.__name__ == 'LSTM':
if len(weights) == 12:
# old: i, c, f, o
# new: i, f, c, o
kernel = np.concatenate(
[weights[0], weights[6], weights[3], weights[9]], axis=-1)
recurrent_kernel = np.concatenate(
[weights[1], weights[7], weights[4], weights[10]], axis=-1)
bias = np.concatenate(
[weights[2], weights[8], weights[5], weights[11]], axis=-1)
weights = [kernel, recurrent_kernel, bias]
if layer.__class__.__name__ == 'ConvLSTM2D':
if len(weights) == 12:
kernel = np.concatenate(
[weights[0], weights[6], weights[3], weights[9]], axis=-1)
recurrent_kernel = np.concatenate(
[weights[1], weights[7], weights[4], weights[10]], axis=-1)
bias = np.concatenate(
[weights[2], weights[8], weights[5], weights[11]], axis=-1)
if layer.data_format == 'channels_first':
# old: (filters, stack_size, kernel_rows, kernel_cols)
# new: (kernel_rows, kernel_cols, stack_size, filters)
kernel = np.transpose(kernel, (2, 3, 1, 0))
recurrent_kernel = np.transpose(recurrent_kernel, (2, 3, 1, 0))
weights = [kernel, recurrent_kernel, bias]
if layer.__class__.__name__ in ['Model', 'Sequential']:
new_weights = []
# trainable weights
for sublayer in layer.layers:
num_weights = len(sublayer.trainable_weights)
if num_weights > 0:
new_weights.extend(
preprocess_weights_for_loading(
layer=sublayer,
weights=weights[:num_weights],
original_keras_version=original_keras_version,
original_backend=original_backend))
weights = weights[num_weights:]
# non-trainable weights
for sublayer in layer.layers:
num_weights = len([
l for l in sublayer.weights if l not in sublayer.trainable_weights
])
if num_weights > 0:
new_weights.extend(
preprocess_weights_for_loading(
layer=sublayer,
weights=weights[:num_weights],
original_keras_version=original_keras_version,
original_backend=original_backend))
weights = weights[num_weights:]
weights = new_weights
conv_layers = ['Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'ConvLSTM2D']
if layer.__class__.__name__ in conv_layers:
if original_backend and K.backend() != original_backend:
weights[0] = conv_utils.convert_kernel(weights[0])
if layer.__class__.__name__ == 'ConvLSTM2D':
weights[1] = conv_utils.convert_kernel(weights[1])
if K.int_shape(layer.weights[0]) != weights[0].shape:
weights[0] = np.transpose(weights[0], (3, 2, 0, 1))
if layer.__class__.__name__ == 'ConvLSTM2D':
weights[1] = np.transpose(weights[1], (3, 2, 0, 1))
return weights
def load_weights_from_hdf5_group(f, layers):
"""Implements topological (order-based) weight loading.
Arguments:
f: A pointer to a HDF5 group.
layers: a list of target layers.
Raises:
ValueError: in case of mismatch between provided layers
and weights file.
"""
if 'keras_version' in f.attrs:
original_keras_version = f.attrs['keras_version'].decode('utf8')
else:
original_keras_version = '1'
if 'backend' in f.attrs:
original_backend = f.attrs['backend'].decode('utf8')
else:
original_backend = None
filtered_layers = []
for layer in layers:
weights = layer.weights
if weights:
filtered_layers.append(layer)
layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
filtered_layer_names = []
for name in layer_names:
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
if weight_names:
filtered_layer_names.append(name)
layer_names = filtered_layer_names
if len(layer_names) != len(filtered_layers):
raise ValueError('You are trying to load a weight file '
'containing ' + str(len(layer_names)) +
' layers into a model with ' + str(len(filtered_layers)) +
' layers.')
# We batch weight value assignments in a single backend call
# which provides a speedup in TensorFlow.
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = [g[weight_name] for weight_name in weight_names]
layer = filtered_layers[k]
symbolic_weights = layer.weights
weight_values = preprocess_weights_for_loading(
layer, weight_values, original_keras_version, original_backend)
if len(weight_values) != len(symbolic_weights):
raise ValueError('Layer #' + str(k) + ' (named "' + layer.name +
'" in the current model) was found to '
'correspond to layer ' + name + ' in the save file. '
'However the new layer ' + layer.name + ' expects ' +
str(len(symbolic_weights)) +
' weights, but the saved weights have ' +
str(len(weight_values)) + ' elements.')
weight_value_tuples += zip(symbolic_weights, weight_values)
K.batch_set_value(weight_value_tuples)
def load_weights_from_hdf5_group_by_name(f, layers):
"""Implements name-based weight loading.
(instead of topological weight loading).
Layers that have no matching name are skipped.
Arguments:
f: A pointer to a HDF5 group.
layers: a list of target layers.
Raises:
ValueError: in case of mismatch between provided layers
and weights file.
"""
if 'keras_version' in f.attrs:
original_keras_version = f.attrs['keras_version'].decode('utf8')
else:
original_keras_version = '1'
if 'backend' in f.attrs:
original_backend = f.attrs['backend'].decode('utf8')
else:
original_backend = None
# New file format.
layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
# Reverse index of layer name to list of layers with name.
index = {}
for layer in layers:
if layer.name:
index.setdefault(layer.name, []).append(layer)
# We batch weight value assignments in a single backend call
# which provides a speedup in TensorFlow.
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = [g[weight_name] for weight_name in weight_names]
for layer in index.get(name, []):
symbolic_weights = layer.weights
weight_values = preprocess_weights_for_loading(
layer, weight_values, original_keras_version, original_backend)
if len(weight_values) != len(symbolic_weights):
raise ValueError('Layer #' + str(k) + ' (named "' + layer.name +
'") expects ' + str(len(symbolic_weights)) +
' weight(s), but the saved weights' + ' have ' +
str(len(weight_values)) + ' element(s).')
# Set values.
for i in range(len(weight_values)):
weight_value_tuples.append((symbolic_weights[i], weight_values[i]))
K.batch_set_value(weight_value_tuples)
| apache-2.0 |
Peddle/hue | desktop/core/ext-py/Paste-2.0.1/paste/debug/wdg_validate.py | 50 | 4268 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Middleware that tests the validity of all generated HTML using the
`WDG HTML Validator <http://www.htmlhelp.com/tools/validator/>`_
"""
from cStringIO import StringIO
import subprocess
from paste.response import header_value
import re
import cgi
__all__ = ['WDGValidateMiddleware']
class WDGValidateMiddleware(object):
"""
Middleware that checks HTML and appends messages about the validity of
the HTML. Uses: http://www.htmlhelp.com/tools/validator/ -- interacts
with the command line client. Use the configuration ``wdg_path`` to
override the path (default: looks for ``validate`` in $PATH).
To install, in your web context's __init__.py::
def urlparser_wrap(environ, start_response, app):
return wdg_validate.WDGValidateMiddleware(app)(
environ, start_response)
Or in your configuration::
middleware.append('paste.wdg_validate.WDGValidateMiddleware')
"""
_end_body_regex = re.compile(r'</body>', re.I)
def __init__(self, app, global_conf=None, wdg_path='validate'):
self.app = app
self.wdg_path = wdg_path
def __call__(self, environ, start_response):
output = StringIO()
response = []
def writer_start_response(status, headers, exc_info=None):
response.extend((status, headers))
start_response(status, headers, exc_info)
return output.write
app_iter = self.app(environ, writer_start_response)
try:
for s in app_iter:
output.write(s)
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
page = output.getvalue()
status, headers = response
v = header_value(headers, 'content-type') or ''
if (not v.startswith('text/html')
and not v.startswith('text/xhtml')
and not v.startswith('application/xhtml')):
# Can't validate
# @@: Should validate CSS too... but using what?
return [page]
ops = []
if v.startswith('text/xhtml+xml'):
ops.append('--xml')
# @@: Should capture encoding too
html_errors = self.call_wdg_validate(
self.wdg_path, ops, page)
if html_errors:
page = self.add_error(page, html_errors)[0]
headers.remove(
('Content-Length',
str(header_value(headers, 'content-length'))))
headers.append(('Content-Length', str(len(page))))
return [page]
def call_wdg_validate(self, wdg_path, ops, page):
if subprocess is None:
raise ValueError(
"This middleware requires the subprocess module from "
"Python 2.4")
proc = subprocess.Popen([wdg_path] + ops,
shell=False,
close_fds=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = proc.communicate(page)[0]
proc.wait()
return stdout
def add_error(self, html_page, html_errors):
add_text = ('<pre style="background-color: #ffd; color: #600; '
'border: 1px solid #000;">%s</pre>'
% cgi.escape(html_errors))
match = self._end_body_regex.search(html_page)
if match:
return [html_page[:match.start()]
+ add_text
+ html_page[match.start():]]
else:
return [html_page + add_text]
def make_wdg_validate_middleware(
app, global_conf, wdg_path='validate'):
"""
Wraps the application in the WDG validator from
http://www.htmlhelp.com/tools/validator/
Validation errors are appended to the text of each page.
You can configure this by giving the path to the validate
executable (by default picked up from $PATH)
"""
return WDGValidateMiddleware(
app, global_conf, wdg_path=wdg_path)
| apache-2.0 |
yordan-desta/QgisIns | python/plugins/GdalTools/tools/doDEM.py | 1 | 8075 | # -*- coding: utf-8 -*-
"""
***************************************************************************
doDEM.py
---------------------
Date : March 2011
Copyright : (C) 2011 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'March 2011'
__copyright__ = '(C) 2011, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from ui_widgetDEM import Ui_GdalToolsWidget as Ui_Widget
from widgetPluginBase import GdalToolsBasePluginWidget as BasePluginWidget
import GdalTools_utils as Utils
class GdalToolsDialog(QWidget, Ui_Widget, BasePluginWidget):
def __init__(self, iface):
QWidget.__init__(self)
self.iface = iface
self.modes = ("hillshade", "slope", "aspect", "color-relief", "TRI", "TPI", "roughness")
self.setupUi(self)
BasePluginWidget.__init__(self, self.iface, "gdaldem")
self.outSelector.setType( self.outSelector.FILE )
self.configSelector.setType( self.configSelector.FILE )
# set the default QSpinBoxes and QProgressBar value
self.bandSpin.setValue(1)
self.hillshadeZFactorSpin.setValue(1)
self.hillshadeScaleSpin.setValue(1)
self.hillshadeAltitudeSpin.setValue(45.0)
self.hillshadeAzimuthSpin.setValue(315.0)
self.slopeScaleSpin.setValue(1)
# set the default color configuration file to terrain
import os.path
colorConfigFile = os.path.join(os.path.dirname(__file__), "terrain.txt")
self.configSelector.setFilename(colorConfigFile)
self.outputFormat = Utils.fillRasterOutputFormat()
self.creationOptionsWidget.setFormat(self.outputFormat)
self.setParamsStatus(
[
(self.inSelector, SIGNAL("filenameChanged()")),
(self.outSelector, SIGNAL("filenameChanged()")),
(self.computeEdgesCheck, SIGNAL("stateChanged(int)"), None, 1800),
(self.bandSpin, SIGNAL("valueChanged(int)"), self.bandCheck),
(self.algorithmCheck, SIGNAL("stateChanged(int)"), None, 1800),
(self.creationOptionsWidget, SIGNAL("optionsChanged()")),
(self.creationOptionsGroupBox, SIGNAL("toggled(bool)")),
(self.modeCombo, SIGNAL("currentIndexChanged(int)")),
([self.hillshadeZFactorSpin, self.hillshadeScaleSpin, self.hillshadeAltitudeSpin, self.hillshadeAzimuthSpin], SIGNAL("valueChanged(double)")),
(self.slopeScaleSpin, SIGNAL("valueChanged(double)")),
(self.slopePercentCheck, SIGNAL("stateChanged(int)")),
([self.aspectTrigonometricCheck, self.aspectZeroForFlatCheck], SIGNAL("stateChanged(int)")),
(self.configSelector, SIGNAL("filenameChanged()")),
([self.colorExactRadio, self.colorNearestRadio], SIGNAL("toggled(bool)"), self.colorMatchGroupBox),
(self.colorAlphaCheck, SIGNAL("stateChanged(int)"))
]
)
self.connect(self.inSelector, SIGNAL("selectClicked()"), self.fillInputFileEdit)
self.connect(self.outSelector, SIGNAL("selectClicked()"), self.fillOutputFileEdit)
self.connect(self.configSelector, SIGNAL("selectClicked()"), self.fillColorConfigFileEdit)
self.connect(self.modeCombo, SIGNAL("currentIndexChanged(int)"), self.showModeParams)
def showModeParams(self, index):
self.stackedWidget.setVisible( index < 4 )
self.algorithmCheck.setVisible( index < 3 )
if index >= 3:
self.algorithmCheck.setChecked( False )
def onLayersChanged(self):
self.inSelector.setLayers( Utils.LayerRegistry.instance().getRasterLayers() )
def fillInputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
inputFile = Utils.FileDialog.getOpenFileName(self, self.tr( "Select the file for DEM" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter)
if not inputFile:
return
Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
self.inSelector.setFilename(inputFile)
self.getArguments()
def fillOutputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
outputFile = Utils.FileDialog.getSaveFileName(self, self.tr( "Select the raster file to save the results to" ), Utils.FileFilter.saveRastersFilter(), lastUsedFilter )
if not outputFile:
return
Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
self.outputFormat = Utils.fillRasterOutputFormat( lastUsedFilter, outputFile )
self.outSelector.setFilename(outputFile)
self.creationOptionsWidget.setFormat(self.outputFormat)
def fillColorConfigFileEdit(self):
configFile = Utils.FileDialog.getOpenFileName(self, self.tr( "Select the color configuration file" ))
if not configFile:
return
self.configSelector.setFilename(configFile)
def getArguments(self):
mode = self.modes[ self.modeCombo.currentIndex() ]
arguments = []
arguments.append( mode)
arguments.append( self.getInputFileName())
if mode == "color-relief":
arguments.append( self.configSelector.filename())
outputFn = self.getOutputFileName()
arguments.append( outputFn)
if mode == "hillshade":
arguments.extend( ["-z", self.hillshadeZFactorSpin.value() ] )
arguments.extend( ["-s" , self.hillshadeScaleSpin.value() ] )
arguments.extend( ["-az" , self.hillshadeAzimuthSpin.value()] )
arguments.extend( ["-alt" , self.hillshadeAltitudeSpin.value() ] )
elif mode == "slope":
if self.slopePercentCheck.isChecked():
arguments.append( "-p")
arguments.extend( [ "-s" , self.slopeScaleSpin.value() ] )
elif mode == "aspect":
if self.aspectTrigonometricCheck.isChecked():
arguments.append( "-trigonometric")
if self.aspectZeroForFlatCheck.isChecked():
arguments.append( "-zero_for_flat")
elif mode == "color-relief":
if self.colorAlphaCheck.isChecked():
arguments.append( "-alpha")
if self.colorMatchGroupBox.isChecked():
if self.colorExactRadio.isChecked():
arguments.append( "-exact_color_entry")
elif self.colorNearestRadio.isChecked():
arguments.append( "-nearest_color_entry")
if self.algorithmCheck.isChecked():
arguments.extend( [ "-alg", "ZevenbergenThorne" ] )
if self.computeEdgesCheck.isChecked():
arguments.append( "-compute_edges")
if self.bandCheck.isChecked():
arguments.extend( [ "-b" , self.bandSpin.value() ] )
if outputFn:
arguments.extend( [ "-of", self.outputFormat ] )
if self.creationOptionsGroupBox.isChecked():
for opt in self.creationOptionsWidget.options():
arguments.extend( [ "-co", opt ] )
# set creation options filename/layer for validation
if self.inSelector.layer():
self.creationOptionsWidget.setRasterLayer(self.inSelector.layer())
else:
self.creationOptionsWidget.setRasterFileName(self.getInputFileName())
return arguments
def getInputFileName(self):
return self.inSelector.filename()
def getOutputFileName(self):
return self.outSelector.filename()
def addLayerIntoCanvas(self, fileInfo):
self.iface.addRasterLayer(fileInfo.filePath())
| gpl-2.0 |
codilime/cloudify-plugins-common | cloudify/tests/test_task_subgraph.py | 3 | 7425 | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import testtools
from cloudify import decorators
from cloudify.workflows import tasks
from cloudify.test_utils import workflow_test
@decorators.operation
def operation(ctx, arg, total_retries=0, **_):
runtime_properties = ctx.instance.runtime_properties
invocations = runtime_properties.get('invocations', [])
invocations.append(arg)
ctx.instance.runtime_properties['invocations'] = invocations
current_retries = runtime_properties.get('current_retries', {})
invocation_current_retries = current_retries.get(arg, 0)
if invocation_current_retries < total_retries:
current_retries[arg] = invocation_current_retries + 1
runtime_properties['current_retries'] = current_retries
return ctx.operation.retry()
@decorators.workflow
def workflow(ctx, test, **_):
instance = next(next(ctx.nodes).instances)
graph = ctx.graph_mode()
tests = {
'subgraph': _test_subgraph,
'nested_subgraph': _test_nested_subgraph,
'empty_subgraph': _test_empty_subgrap,
'task_in_subgraph_retry': _test_task_in_subgraph_retry,
'subgraph_retry': _test_subgraph_retry,
'subgraph_retry_failure': _test_subgraph_retry_failure,
'task_in_two_subgraphs': _test_task_in_two_subgraphs
}
tests[test](ctx, graph, instance)
graph.execute()
def _test_subgraph(ctx, graph, instance):
seq = graph.sequence()
for i in range(2):
subgraph = graph.subgraph('sub{0}'.format(i))
subseq = subgraph.sequence()
for j in range(2):
subseq.add(instance.execute_operation(
'interface.operation',
kwargs={'arg': (i, j)}))
seq.add(subgraph)
def _test_nested_subgraph(ctx, graph, instance):
seq = graph.sequence()
for i in range(2):
subgraph = graph.subgraph('sub{0}'.format(i))
subseq = subgraph.sequence()
for j in range(2):
subsubgraph = subgraph.subgraph('subsub{0}_{1}'.format(i, j))
subsubseq = subsubgraph.sequence()
for k in range(2):
subsubseq.add(instance.execute_operation(
'interface.operation',
kwargs={'arg': (i, j, k)}))
subseq.add(subsubgraph)
seq.add(subgraph)
def _test_empty_subgrap(ctx, graph, instance):
graph.subgraph('empty')
def _test_task_in_subgraph_retry(ctx, graph, instance):
seq = graph.sequence()
for i in range(2):
subgraph = graph.subgraph('sub{0}'.format(i))
subseq = subgraph.sequence()
for j in range(2):
subseq.add(instance.execute_operation(
'interface.operation',
kwargs={'arg': (i, j),
'total_retries': 1}))
seq.add(subgraph)
def _test_subgraph_retry(ctx, graph, instance):
def build_graph(total_retries):
result = graph.subgraph('retried')
result.add_task(instance.execute_operation(
'interface.operation', kwargs={'arg': '',
'total_retries': total_retries}))
return result
subgraph = build_graph(total_retries=2)
def retry_handler(subgraph2):
if subgraph2.failed_task.name != ('cloudify.tests.test_task_subgraph.'
'operation'):
return tasks.HandlerResult.fail()
result = tasks.HandlerResult.retry()
result.retried_task = build_graph(total_retries=0)
result.retried_task.current_retries = subgraph2.current_retries + 1
return result
subgraph.on_failure = retry_handler
def _test_subgraph_retry_failure(ctx, graph, instance):
def build_graph():
result = graph.subgraph('retried')
result.add_task(instance.execute_operation(
'interface.operation', kwargs={'arg': '',
'total_retries': 20}))
return result
subgraph = build_graph()
def retry_handler(subgraph2):
result = tasks.HandlerResult.retry()
result.retried_task = build_graph()
result.retried_task.on_failure = retry_handler
result.retried_task.current_retries = subgraph2.current_retries + 1
return result
subgraph.on_failure = retry_handler
def _test_task_in_two_subgraphs(ctx, graph, instance):
sub1 = graph.subgraph('sub1')
sub2 = graph.subgraph('sub2')
task = instance.execute_operation('interface.operation')
sub1.add_task(task)
sub2.add_task(task)
class TaskSubgraphWorkflowTests(testtools.TestCase):
@workflow_test('resources/blueprints/test-task-subgraph-blueprint.yaml')
def setUp(self, env=None):
super(TaskSubgraphWorkflowTests, self).setUp()
self.env = env
def _run(self, test, subgraph_retries=0):
self.env.execute('workflow',
parameters={'test': test},
task_retries=1,
task_retry_interval=0,
subgraph_retries=subgraph_retries)
@property
def invocations(self):
return self.env.storage.get_node_instances()[0].runtime_properties[
'invocations']
def test_task_subgraph(self):
self._run('subgraph')
invocations = self.invocations
self.assertEqual(len(invocations), 4)
self.assertEqual(invocations, sorted(invocations))
def test_nested_task_subgraph(self):
self._run('nested_subgraph')
invocations = self.invocations
self.assertEqual(len(invocations), 8)
self.assertEqual(invocations, sorted(invocations))
def test_empty_subgraph(self):
self._run('empty_subgraph')
def test_task_in_subgraph_retry(self):
self._run('task_in_subgraph_retry')
invocations = self.invocations
self.assertEqual(len(invocations), 8)
self.assertEqual(invocations, sorted(invocations))
for i in range(4):
self.assertEqual(invocations[2*i], invocations[2*i+1])
def test_subgraph_retry_sanity(self):
self.assertRaises(RuntimeError,
self._run, 'subgraph_retry', subgraph_retries=0)
self.assertEqual(len(self.invocations), 2)
def test_subgraph_retry(self):
self._run('subgraph_retry', subgraph_retries=1)
self.assertEqual(len(self.invocations), 3)
def test_subgraph_retry_failure(self):
self.assertRaises(RuntimeError,
self._run, 'subgraph_retry_failure',
subgraph_retries=2)
self.assertEqual(len(self.invocations), 6)
def test_invalid_task_in_two_subgraphs(self):
self.assertRaises(RuntimeError,
self._run, 'task_in_two_subgraphs')
| apache-2.0 |
pdufour/sqlalchemy | examples/dogpile_caching/local_session_caching.py | 30 | 3372 | """local_session_caching.py
Grok everything so far ? This example
creates a new dogpile.cache backend that will persist data in a dictionary
which is local to the current session. remove() the session
and the cache is gone.
Create a new Dogpile cache backend that will store
cached data local to the current Session.
This is an advanced example which assumes familiarity
with the basic operation of CachingQuery.
"""
from dogpile.cache.api import CacheBackend, NO_VALUE
from dogpile.cache.region import register_backend
class ScopedSessionBackend(CacheBackend):
"""A dogpile backend which will cache objects locally on
the current session.
When used with the query_cache system, the effect is that the objects
in the cache are the same as that within the session - the merge()
is a formality that doesn't actually create a second instance.
This makes it safe to use for updates of data from an identity
perspective (still not ideal for deletes though).
When the session is removed, the cache is gone too, so the cache
is automatically disposed upon session.remove().
"""
def __init__(self, arguments):
self.scoped_session = arguments['scoped_session']
def get(self, key):
return self._cache_dictionary.get(key, NO_VALUE)
def set(self, key, value):
self._cache_dictionary[key] = value
def delete(self, key):
self._cache_dictionary.pop(key, None)
@property
def _cache_dictionary(self):
"""Return the cache dictionary linked to the current Session."""
sess = self.scoped_session()
try:
cache_dict = sess._cache_dictionary
except AttributeError:
sess._cache_dictionary = cache_dict = {}
return cache_dict
register_backend("sqlalchemy.session", __name__, "ScopedSessionBackend")
if __name__ == '__main__':
from .environment import Session, regions
from .caching_query import FromCache
from dogpile.cache import make_region
# set up a region based on the ScopedSessionBackend,
# pointing to the scoped_session declared in the example
# environment.
regions['local_session'] = make_region().configure(
'sqlalchemy.session',
arguments={
"scoped_session": Session
}
)
from .model import Person
# query to load Person by name, with criterion
# of "person 10"
q = Session.query(Person).\
options(FromCache("local_session")).\
filter(Person.name == "person 10")
# load from DB
person10 = q.one()
# next call, the query is cached.
person10 = q.one()
# clear out the Session. The "_cache_dictionary" dictionary
# disappears with it.
Session.remove()
# query calls from DB again
person10 = q.one()
# identity is preserved - person10 is the *same* object that's
# ultimately inside the cache. So it is safe to manipulate
# the not-queried-for attributes of objects when using such a
# cache without the need to invalidate - however, any change
# that would change the results of a cached query, such as
# inserts, deletes, or modification to attributes that are
# part of query criterion, still require careful invalidation.
cache, key = q._get_cache_plus_key()
assert person10 is cache.get(key)[0]
| mit |
gsobczyk/hamster | waflib/Tools/intltool.py | 56 | 6784 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2018 (ita)
"""
Support for translation tools such as msgfmt and intltool
Usage::
def configure(conf):
conf.load('gnu_dirs intltool')
def build(bld):
# process the .po files into .gmo files, and install them in LOCALEDIR
bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}")
# process an input file, substituting the translations from the po dir
bld(
features = "intltool_in",
podir = "../po",
style = "desktop",
flags = ["-u"],
source = 'kupfer.desktop.in',
install_path = "${DATADIR}/applications",
)
Usage of the :py:mod:`waflib.Tools.gnu_dirs` is recommended, but not obligatory.
"""
from __future__ import with_statement
import os, re
from waflib import Context, Task, Utils, Logs
import waflib.Tools.ccroot
from waflib.TaskGen import feature, before_method, taskgen_method
from waflib.Logs import error
from waflib.Configure import conf
_style_flags = {
'ba': '-b',
'desktop': '-d',
'keys': '-k',
'quoted': '--quoted-style',
'quotedxml': '--quotedxml-style',
'rfc822deb': '-r',
'schemas': '-s',
'xml': '-x',
}
@taskgen_method
def ensure_localedir(self):
"""
Expands LOCALEDIR from DATAROOTDIR/locale if possible, or falls back to PREFIX/share/locale
"""
# use the tool gnu_dirs to provide options to define this
if not self.env.LOCALEDIR:
if self.env.DATAROOTDIR:
self.env.LOCALEDIR = os.path.join(self.env.DATAROOTDIR, 'locale')
else:
self.env.LOCALEDIR = os.path.join(self.env.PREFIX, 'share', 'locale')
@before_method('process_source')
@feature('intltool_in')
def apply_intltool_in_f(self):
"""
Creates tasks to translate files by intltool-merge::
def build(bld):
bld(
features = "intltool_in",
podir = "../po",
style = "desktop",
flags = ["-u"],
source = 'kupfer.desktop.in',
install_path = "${DATADIR}/applications",
)
:param podir: location of the .po files
:type podir: string
:param source: source files to process
:type source: list of string
:param style: the intltool-merge mode of operation, can be one of the following values:
``ba``, ``desktop``, ``keys``, ``quoted``, ``quotedxml``, ``rfc822deb``, ``schemas`` and ``xml``.
See the ``intltool-merge`` man page for more information about supported modes of operation.
:type style: string
:param flags: compilation flags ("-quc" by default)
:type flags: list of string
:param install_path: installation path
:type install_path: string
"""
try:
self.meths.remove('process_source')
except ValueError:
pass
self.ensure_localedir()
podir = getattr(self, 'podir', '.')
podirnode = self.path.find_dir(podir)
if not podirnode:
error("could not find the podir %r" % podir)
return
cache = getattr(self, 'intlcache', '.intlcache')
self.env.INTLCACHE = [os.path.join(str(self.path.get_bld()), podir, cache)]
self.env.INTLPODIR = podirnode.bldpath()
self.env.append_value('INTLFLAGS', getattr(self, 'flags', self.env.INTLFLAGS_DEFAULT))
if '-c' in self.env.INTLFLAGS:
self.bld.fatal('Redundant -c flag in intltool task %r' % self)
style = getattr(self, 'style', None)
if style:
try:
style_flag = _style_flags[style]
except KeyError:
self.bld.fatal('intltool_in style "%s" is not valid' % style)
self.env.append_unique('INTLFLAGS', [style_flag])
for i in self.to_list(self.source):
node = self.path.find_resource(i)
task = self.create_task('intltool', node, node.change_ext(''))
inst = getattr(self, 'install_path', None)
if inst:
self.add_install_files(install_to=inst, install_from=task.outputs)
@feature('intltool_po')
def apply_intltool_po(self):
"""
Creates tasks to process po files::
def build(bld):
bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}")
The relevant task generator arguments are:
:param podir: directory of the .po files
:type podir: string
:param appname: name of the application
:type appname: string
:param install_path: installation directory
:type install_path: string
The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process.
"""
try:
self.meths.remove('process_source')
except ValueError:
pass
self.ensure_localedir()
appname = getattr(self, 'appname', getattr(Context.g_module, Context.APPNAME, 'set_your_app_name'))
podir = getattr(self, 'podir', '.')
inst = getattr(self, 'install_path', '${LOCALEDIR}')
linguas = self.path.find_node(os.path.join(podir, 'LINGUAS'))
if linguas:
# scan LINGUAS file for locales to process
with open(linguas.abspath()) as f:
langs = []
for line in f.readlines():
# ignore lines containing comments
if not line.startswith('#'):
langs += line.split()
re_linguas = re.compile('[-a-zA-Z_@.]+')
for lang in langs:
# Make sure that we only process lines which contain locales
if re_linguas.match(lang):
node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po'))
task = self.create_task('po', node, node.change_ext('.mo'))
if inst:
filename = task.outputs[0].name
(langname, ext) = os.path.splitext(filename)
inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo'
self.add_install_as(install_to=inst_file, install_from=task.outputs[0],
chmod=getattr(self, 'chmod', Utils.O644))
else:
Logs.pprint('RED', "Error no LINGUAS file found in po directory")
class po(Task.Task):
"""
Compiles .po files into .gmo files
"""
run_str = '${MSGFMT} -o ${TGT} ${SRC}'
color = 'BLUE'
class intltool(Task.Task):
"""
Calls intltool-merge to update translation files
"""
run_str = '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE_ST:INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}'
color = 'BLUE'
@conf
def find_msgfmt(conf):
"""
Detects msgfmt and sets the ``MSGFMT`` variable
"""
conf.find_program('msgfmt', var='MSGFMT')
@conf
def find_intltool_merge(conf):
"""
Detects intltool-merge
"""
if not conf.env.PERL:
conf.find_program('perl', var='PERL')
conf.env.INTLCACHE_ST = '--cache=%s'
conf.env.INTLFLAGS_DEFAULT = ['-q', '-u']
conf.find_program('intltool-merge', interpreter='PERL', var='INTLTOOL')
def configure(conf):
"""
Detects the program *msgfmt* and set *conf.env.MSGFMT*.
Detects the program *intltool-merge* and set *conf.env.INTLTOOL*.
It is possible to set INTLTOOL in the environment, but it must not have spaces in it::
$ INTLTOOL="/path/to/the program/intltool" waf configure
If a C/C++ compiler is present, execute a compilation test to find the header *locale.h*.
"""
conf.find_msgfmt()
conf.find_intltool_merge()
if conf.env.CC or conf.env.CXX:
conf.check(header_name='locale.h')
| gpl-3.0 |
bioconda/recipes | recipes/biopet-extractadaptersfastqc/biopet-extractadaptersfastqc.py | 61 | 3393 | #!/usr/bin/env python
#
# Wrapper script for starting the biopet-extractadaptersfastqc JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'ExtractAdaptersFastqc-assembly-0.2.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
happy56/kivy | kivy/adapters/simplelistadapter.py | 3 | 2135 | '''
SimpleListAdapter
=================
.. versionadded:: 1.5
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is for basic lists,
such as for showing a text-only display of strings, that have no user
interaction.
'''
__all__ = ('SimpleListAdapter', )
from kivy.adapters.adapter import Adapter
from kivy.properties import ListProperty
from kivy.lang import Builder
class SimpleListAdapter(Adapter):
''':class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is an
adapter around a Python list.
From :class:`~kivy.adapters.adapter.Adapter`,
:class:`~kivy.adapters.simplelistadapter.ListAdapter` gets cls, template,
and args_converter properties.
'''
data = ListProperty([])
'''The data list property contains a list of objects (can be strings) that
will be used directly if no args_converter function is provided. If there
is an args_converter, the data objects will be passed to it for
instantiation of item view class instances from the data.
:data:`data` is a :class:`~kivy.properties.ListProperty`,
default to [].
'''
def __init__(self, **kwargs):
if 'data' not in kwargs:
raise Exception('list adapter: input must include data argument')
if type(kwargs['data']) not in (tuple, list):
raise Exception('list adapter: data must be a tuple or list')
super(SimpleListAdapter, self).__init__(**kwargs)
def get_count(self):
return len(self.data)
def get_data_item(self, index):
if index < 0 or index >= len(self.data):
return None
return self.data[index]
# Returns a view instance for an item.
def get_view(self, index):
item = self.get_data_item(index)
if item is None:
return None
item_args = self.args_converter(index, item)
if self.cls:
instance = self.cls(**item_args)
return instance
else:
return Builder.template(self.template, **item_args)
| lgpl-3.0 |
nieklinnenbank/bouwer | test/test_util/test_Singleton.py | 1 | 4375 | #
# Copyright (C) 2012 Niek Linnenbank
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import bouwer.util
from test import *
class DummySingleton1(bouwer.util.Singleton):
""" First Dummy Singleton class """
def __init__(self, arg1, arg2):
""" Constructor """
self.arg1 = arg1
self.arg2 = arg2
class DummySingleton2(bouwer.util.Singleton):
""" Second Dummy Singleton class """
def __init__(self, arg1, arg2, arg3):
""" Constructor """
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
class DummySingleton3(bouwer.util.Singleton):
def __init__(self):
self.arg1 = 123
class SingletonTester(BouwerTester):
""" Tests for the Bouwer command line interface """
def setUp(self):
""" Runs before each testcase """
DummySingleton1.Destroy()
DummySingleton2.Destroy()
def test_single(self):
""" Verify that Singleton has one instance maximum """
dummy1 = DummySingleton1.Instance("a", "b")
dummy2 = DummySingleton1.Instance()
dummy3 = DummySingleton1.Instance()
self.assertEquals(dummy1, dummy2,
'Singleton.Instance() must return the same instance')
self.assertEquals(dummy1, dummy3,
'Singleton.Instance() must return the same instance')
self.assertEquals(id(dummy1), id(dummy2),
'Singleton.Instance() must return the same instance')
self.assertEquals(id(dummy1), id(dummy3),
'Singleton.Instance() must return the same instance')
def test_single_no_params(self):
""" Verify that Singletons with no parameters have one instance """
dummy1 = DummySingleton3.Instance()
dummy2 = DummySingleton3.Instance()
dummy3 = DummySingleton3.Instance()
self.assertEquals(dummy1, dummy2)
self.assertEquals(dummy1, dummy3)
self.assertEquals(id(dummy1), id(dummy2))
self.assertEquals(id(dummy1), id(dummy3))
def test_unique(self):
""" Verify that Singleton produces a unique instance per class """
dummy1 = DummySingleton1.Instance("a", "b")
dummy2 = DummySingleton2.Instance("a", "b", "c")
self.assertNotEqual(dummy1, dummy2,
'Singleton.Instance() must return unique instance per class')
self.assertNotEqual(id(dummy1), id(dummy2),
'Singleton.Instance() msut return unique instance per class')
def test_init(self):
""" Verify that Singletons can only be initialized once """
dummy1 = DummySingleton1.Instance("a", "b")
dummy2 = DummySingleton2.Instance("1", "2", "3")
self.assertRaises(Exception, DummySingleton1.Instance, "a", "b")
self.assertRaises(Exception, DummySingleton2.Instance, "4", "5", "6")
def test_no_construct(self):
""" Verify that Singleton constructors cannot be called """
dummy1 = DummySingleton1.Instance("a", "b")
dummy3 = DummySingleton3.Instance()
self.assertRaises(Exception, DummySingleton1, "c", "d")
self.assertRaises(Exception, DummySingleton3)
def test_reset(self):
""" Verify that Destroy() completely destroys the Singleton instance """
dummy1 = DummySingleton1.Instance("a", "b")
DummySingleton1.Destroy()
dummy2 = DummySingleton1.Instance("a", "b")
dummy3 = DummySingleton1.Instance()
self.assertNotEqual(dummy1, dummy2)
self.assertEqual(dummy2, dummy3)
self.assertEqual(id(dummy2), id(dummy3))
self.assertEqual(id(dummy2.__orig_init__), id(dummy3.__orig_init__))
self.assertEqual(id(dummy2.__orig_init__), id(dummy1.__orig_init__))
| gpl-3.0 |
Ernesto99/odoo | openerp/tools/pdf_utils.py | 456 | 3659 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:contact@logilab.fr
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cbenhagen/buildozer | docs/source/conf.py | 9 | 7963 | # -*- coding: utf-8 -*-
#
# Buildozer documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 20 16:56:31 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Buildozer'
copyright = u'2014, Kivy\'s Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.11'
# The full version, including alpha/beta/rc tags.
release = '0.11'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Buildozerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Buildozer.tex', u'Buildozer Documentation',
u'Kivy\'s Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'buildozer', u'Buildozer Documentation',
[u'Kivy\'s Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Buildozer', u'Buildozer Documentation',
u'Kivy\'s Developers', 'Buildozer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
cclauss/heroku-buildpack-python | vendor/pip-pop/pip/_vendor/packaging/_structures.py | 906 | 1809 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
| mit |
loco-odoo/localizacion_co | openerp/addons/account/report/account_treasury_report.py | 385 | 3872 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
import openerp.addons.decimal_precision as dp
class account_treasury_report(osv.osv):
_name = "account.treasury.report"
_description = "Treasury Analysis"
_auto = False
def _compute_balances(self, cr, uid, ids, field_names, arg=None, context=None,
query='', query_params=()):
all_treasury_lines = self.search(cr, uid, [], context=context)
all_companies = self.pool.get('res.company').search(cr, uid, [], context=context)
current_sum = dict((company, 0.0) for company in all_companies)
res = dict((id, dict((fn, 0.0) for fn in field_names)) for id in all_treasury_lines)
for record in self.browse(cr, uid, all_treasury_lines, context=context):
res[record.id]['starting_balance'] = current_sum[record.company_id.id]
current_sum[record.company_id.id] += record.balance
res[record.id]['ending_balance'] = current_sum[record.company_id.id]
return res
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscalyear', readonly=True),
'period_id': fields.many2one('account.period', 'Period', readonly=True),
'debit': fields.float('Debit', readonly=True),
'credit': fields.float('Credit', readonly=True),
'balance': fields.float('Balance', readonly=True),
'date': fields.date('Beginning of Period Date', readonly=True),
'starting_balance': fields.function(_compute_balances, digits_compute=dp.get_precision('Account'), string='Starting Balance', multi='balance'),
'ending_balance': fields.function(_compute_balances, digits_compute=dp.get_precision('Account'), string='Ending Balance', multi='balance'),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
}
_order = 'date asc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'account_treasury_report')
cr.execute("""
create or replace view account_treasury_report as (
select
p.id as id,
p.fiscalyear_id as fiscalyear_id,
p.id as period_id,
sum(l.debit) as debit,
sum(l.credit) as credit,
sum(l.debit-l.credit) as balance,
p.date_start as date,
am.company_id as company_id
from
account_move_line l
left join account_account a on (l.account_id = a.id)
left join account_move am on (am.id=l.move_id)
left join account_period p on (am.period_id=p.id)
where l.state != 'draft'
and a.type = 'liquidity'
group by p.id, p.fiscalyear_id, p.date_start, am.company_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Julius2342/pyvlx | pyvlx/api/frames/frame_discover_nodes.py | 1 | 1711 | """Module for discover nodes requests."""
from pyvlx.const import Command, NodeType
from .frame import FrameBase
class FrameDiscoverNodesRequest(FrameBase):
"""Frame for discover nodes request."""
PAYLOAD_LEN = 1
def __init__(self, node_type=NodeType.NO_TYPE):
"""Init Frame."""
super().__init__(Command.GW_CS_DISCOVER_NODES_REQ)
self.node_type = node_type
def get_payload(self):
"""Return Payload."""
ret = bytes([self.node_type.value])
return ret
def from_payload(self, payload):
"""Init frame from binary data."""
self.node_type = NodeType(payload[0])
def __str__(self):
"""Return human readable string."""
return '<{} node_type="{}"/>'.format(type(self).__name__, self.node_type)
class FrameDiscoverNodesConfirmation(FrameBase):
"""Frame for discover nodes confirmation."""
PAYLOAD_LEN = 0
def __init__(self):
"""Init Frame."""
super().__init__(Command.GW_CS_DISCOVER_NODES_CFM)
class FrameDiscoverNodesNotification(FrameBase):
"""Frame for discover nodes notification."""
PAYLOAD_LEN = 131
def __init__(self):
"""Init Frame."""
super().__init__(Command.GW_CS_DISCOVER_NODES_NTF)
self.payload = b"\0" * 131
def get_payload(self):
"""Return Payload."""
return self.payload
def from_payload(self, payload):
"""Init frame from binary data."""
self.payload = payload
def __str__(self):
"""Return human readable string."""
return '<{} payload="{}"/>'.format(
type(self).__name__,
':'.join('{:02x}'.format(c) for c in self.payload)
)
| lgpl-3.0 |
andrecunha/coh-metrix-dementia | coh/scripts/process_fapesp.py | 1 | 2828 | # -*- coding: utf-8 -*-
# Coh-Metrix-Dementia - Automatic text analysis and classification for dementia.
# Copyright (C) 2014 Andre Luiz Verucci da Cunha
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals, print_function, division
from sys import argv
import coh
import logging
import os
from itertools import chain
import nltk
from nltk.data import load
import multiprocessing
coh.config.from_object('config')
logger = logging.getLogger(__name__)
stopwords = nltk.corpus.stopwords.words('portuguese')
senter = load('tokenizers/punkt/portuguese.pickle')
word_tokenize = nltk.word_tokenize
def process_file(args):
in_file, i, nfiles, in_dir, out_dir = args
logger.info('Will process file %s (%d/%d)', in_file, i + 1, nfiles)
t = coh.Text(filepath=os.path.join(in_dir, in_file))
deleted = 0
while not t.paragraphs[0][-1] in ('.', ':', '?', '!'):
del t.paragraphs[0]
deleted += 1
logger.info('Deleted %d lines', deleted)
sentences = chain.from_iterable(
[senter.tokenize(p) for p in t.paragraphs])
tokens = [[word.lower() for word in word_tokenize(sent)
if word.lower() not in stopwords and word.isalpha()]
for sent in sentences]
with open(os.path.join(out_dir, in_file), 'w') as out_file:
joined_sentences = '\n'.join([' '.join(sentence)
for sentence in tokens])
out_file.write(joined_sentences)
def main(in_dir, out_dir, nworkers):
logger.info('Listing files...')
in_files = sorted(os.listdir(in_dir))
logger.info('Found %d files in %s.', len(in_files), in_dir)
working_list = [(in_file, i, len(in_files), in_dir, out_dir)
for i, in_file in enumerate(in_files)]
pool = multiprocessing.Pool(nworkers)
pool.map(process_file, working_list)
logger.info('Done processing directory %s.', in_dir)
if __name__ == '__main__':
FORMAT = '%(asctime)-15s %(levelname)s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
if len(argv) != 4:
print('Usage: python', argv[0], '<in_dir>', '<out_dir>', '<nworkers>')
else:
main(argv[1], argv[2], int(argv[3]))
| gpl-3.0 |
rven/odoo | addons/mrp/models/stock_scrap.py | 5 | 1540 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class StockScrap(models.Model):
_inherit = 'stock.scrap'
production_id = fields.Many2one(
'mrp.production', 'Manufacturing Order',
states={'done': [('readonly', True)]}, check_company=True)
workorder_id = fields.Many2one(
'mrp.workorder', 'Work Order',
states={'done': [('readonly', True)]},
help='Not to restrict or prefer quants, but informative.', check_company=True)
@api.onchange('workorder_id')
def _onchange_workorder_id(self):
if self.workorder_id:
self.location_id = self.workorder_id.production_id.location_src_id.id
@api.onchange('production_id')
def _onchange_production_id(self):
if self.production_id:
self.location_id = self.production_id.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and self.production_id.location_src_id.id or self.production_id.location_dest_id.id
def _prepare_move_values(self):
vals = super(StockScrap, self)._prepare_move_values()
if self.production_id:
vals['origin'] = vals['origin'] or self.production_id.name
if self.product_id in self.production_id.move_finished_ids.mapped('product_id'):
vals.update({'production_id': self.production_id.id})
else:
vals.update({'raw_material_production_id': self.production_id.id})
return vals
| agpl-3.0 |
OsirisSPS/osiris-sps | client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/test/test_decorators.py | 194 | 9849 | import unittest
from test import test_support
def funcattrs(**kwds):
def decorate(func):
func.__dict__.update(kwds)
return func
return decorate
class MiscDecorators (object):
@staticmethod
def author(name):
def decorate(func):
func.__dict__['author'] = name
return func
return decorate
# -----------------------------------------------
class DbcheckError (Exception):
def __init__(self, exprstr, func, args, kwds):
# A real version of this would set attributes here
Exception.__init__(self, "dbcheck %r failed (func=%s args=%s kwds=%s)" %
(exprstr, func, args, kwds))
def dbcheck(exprstr, globals=None, locals=None):
"Decorator to implement debugging assertions"
def decorate(func):
expr = compile(exprstr, "dbcheck-%s" % func.func_name, "eval")
def check(*args, **kwds):
if not eval(expr, globals, locals):
raise DbcheckError(exprstr, func, args, kwds)
return func(*args, **kwds)
return check
return decorate
# -----------------------------------------------
def countcalls(counts):
"Decorator to count calls to a function"
def decorate(func):
func_name = func.func_name
counts[func_name] = 0
def call(*args, **kwds):
counts[func_name] += 1
return func(*args, **kwds)
call.func_name = func_name
return call
return decorate
# -----------------------------------------------
def memoize(func):
saved = {}
def call(*args):
try:
return saved[args]
except KeyError:
res = func(*args)
saved[args] = res
return res
except TypeError:
# Unhashable argument
return func(*args)
call.func_name = func.func_name
return call
# -----------------------------------------------
class TestDecorators(unittest.TestCase):
def test_single(self):
class C(object):
@staticmethod
def foo(): return 42
self.assertEqual(C.foo(), 42)
self.assertEqual(C().foo(), 42)
def test_staticmethod_function(self):
@staticmethod
def notamethod(x):
return x
self.assertRaises(TypeError, notamethod, 1)
def test_dotted(self):
decorators = MiscDecorators()
@decorators.author('Cleese')
def foo(): return 42
self.assertEqual(foo(), 42)
self.assertEqual(foo.author, 'Cleese')
def test_argforms(self):
# A few tests of argument passing, as we use restricted form
# of expressions for decorators.
def noteargs(*args, **kwds):
def decorate(func):
setattr(func, 'dbval', (args, kwds))
return func
return decorate
args = ( 'Now', 'is', 'the', 'time' )
kwds = dict(one=1, two=2)
@noteargs(*args, **kwds)
def f1(): return 42
self.assertEqual(f1(), 42)
self.assertEqual(f1.dbval, (args, kwds))
@noteargs('terry', 'gilliam', eric='idle', john='cleese')
def f2(): return 84
self.assertEqual(f2(), 84)
self.assertEqual(f2.dbval, (('terry', 'gilliam'),
dict(eric='idle', john='cleese')))
@noteargs(1, 2,)
def f3(): pass
self.assertEqual(f3.dbval, ((1, 2), {}))
def test_dbcheck(self):
@dbcheck('args[1] is not None')
def f(a, b):
return a + b
self.assertEqual(f(1, 2), 3)
self.assertRaises(DbcheckError, f, 1, None)
def test_memoize(self):
counts = {}
@memoize
@countcalls(counts)
def double(x):
return x * 2
self.assertEqual(double.func_name, 'double')
self.assertEqual(counts, dict(double=0))
# Only the first call with a given argument bumps the call count:
#
self.assertEqual(double(2), 4)
self.assertEqual(counts['double'], 1)
self.assertEqual(double(2), 4)
self.assertEqual(counts['double'], 1)
self.assertEqual(double(3), 6)
self.assertEqual(counts['double'], 2)
# Unhashable arguments do not get memoized:
#
self.assertEqual(double([10]), [10, 10])
self.assertEqual(counts['double'], 3)
self.assertEqual(double([10]), [10, 10])
self.assertEqual(counts['double'], 4)
def test_errors(self):
# Test syntax restrictions - these are all compile-time errors:
#
for expr in [ "1+2", "x[3]", "(1, 2)" ]:
# Sanity check: is expr is a valid expression by itself?
compile(expr, "testexpr", "exec")
codestr = "@%s\ndef f(): pass" % expr
self.assertRaises(SyntaxError, compile, codestr, "test", "exec")
# You can't put multiple decorators on a single line:
#
self.assertRaises(SyntaxError, compile,
"@f1 @f2\ndef f(): pass", "test", "exec")
# Test runtime errors
def unimp(func):
raise NotImplementedError
context = dict(nullval=None, unimp=unimp)
for expr, exc in [ ("undef", NameError),
("nullval", TypeError),
("nullval.attr", AttributeError),
("unimp", NotImplementedError)]:
codestr = "@%s\ndef f(): pass\nassert f() is None" % expr
code = compile(codestr, "test", "exec")
self.assertRaises(exc, eval, code, context)
def test_double(self):
class C(object):
@funcattrs(abc=1, xyz="haha")
@funcattrs(booh=42)
def foo(self): return 42
self.assertEqual(C().foo(), 42)
self.assertEqual(C.foo.abc, 1)
self.assertEqual(C.foo.xyz, "haha")
self.assertEqual(C.foo.booh, 42)
def test_order(self):
# Test that decorators are applied in the proper order to the function
# they are decorating.
def callnum(num):
"""Decorator factory that returns a decorator that replaces the
passed-in function with one that returns the value of 'num'"""
def deco(func):
return lambda: num
return deco
@callnum(2)
@callnum(1)
def foo(): return 42
self.assertEqual(foo(), 2,
"Application order of decorators is incorrect")
def test_eval_order(self):
# Evaluating a decorated function involves four steps for each
# decorator-maker (the function that returns a decorator):
#
# 1: Evaluate the decorator-maker name
# 2: Evaluate the decorator-maker arguments (if any)
# 3: Call the decorator-maker to make a decorator
# 4: Call the decorator
#
# When there are multiple decorators, these steps should be
# performed in the above order for each decorator, but we should
# iterate through the decorators in the reverse of the order they
# appear in the source.
actions = []
def make_decorator(tag):
actions.append('makedec' + tag)
def decorate(func):
actions.append('calldec' + tag)
return func
return decorate
class NameLookupTracer (object):
def __init__(self, index):
self.index = index
def __getattr__(self, fname):
if fname == 'make_decorator':
opname, res = ('evalname', make_decorator)
elif fname == 'arg':
opname, res = ('evalargs', str(self.index))
else:
assert False, "Unknown attrname %s" % fname
actions.append('%s%d' % (opname, self.index))
return res
c1, c2, c3 = map(NameLookupTracer, [ 1, 2, 3 ])
expected_actions = [ 'evalname1', 'evalargs1', 'makedec1',
'evalname2', 'evalargs2', 'makedec2',
'evalname3', 'evalargs3', 'makedec3',
'calldec3', 'calldec2', 'calldec1' ]
actions = []
@c1.make_decorator(c1.arg)
@c2.make_decorator(c2.arg)
@c3.make_decorator(c3.arg)
def foo(): return 42
self.assertEqual(foo(), 42)
self.assertEqual(actions, expected_actions)
# Test the equivalence claim in chapter 7 of the reference manual.
#
actions = []
def bar(): return 42
bar = c1.make_decorator(c1.arg)(c2.make_decorator(c2.arg)(c3.make_decorator(c3.arg)(bar)))
self.assertEqual(bar(), 42)
self.assertEqual(actions, expected_actions)
class TestClassDecorators(unittest.TestCase):
def test_simple(self):
def plain(x):
x.extra = 'Hello'
return x
@plain
class C(object): pass
self.assertEqual(C.extra, 'Hello')
def test_double(self):
def ten(x):
x.extra = 10
return x
def add_five(x):
x.extra += 5
return x
@add_five
@ten
class C(object): pass
self.assertEqual(C.extra, 15)
def test_order(self):
def applied_first(x):
x.extra = 'first'
return x
def applied_second(x):
x.extra = 'second'
return x
@applied_second
@applied_first
class C(object): pass
self.assertEqual(C.extra, 'second')
def test_main():
test_support.run_unittest(TestDecorators)
test_support.run_unittest(TestClassDecorators)
if __name__=="__main__":
test_main()
| gpl-3.0 |
Vogeltak/pauselan | lib/python3.4/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py | 32 | 38566 | # oracle/cx_oracle.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+cx_oracle
:name: cx-Oracle
:dbapi: cx_oracle
:connectstring: oracle+cx_oracle://user:pass@host:port/dbname\
[?key=value&key=value...]
:url: http://cx-oracle.sourceforge.net/
Additional Connect Arguments
----------------------------
When connecting with ``dbname`` present, the host, port, and dbname tokens are
converted to a TNS name using
the cx_oracle ``makedsn()`` function. Otherwise, the host token is taken
directly as a TNS name.
Additional arguments which may be specified either as query string arguments
on the URL, or as keyword arguments to :func:`.create_engine()` are:
* ``allow_twophase`` - enable two-phase transactions. Defaults to ``True``.
* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted
to 50. This setting is significant with cx_Oracle as the contents of LOB
objects are only readable within a "live" row (e.g. within a batch of
50 rows).
* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`.
* ``auto_setinputsizes`` - the cx_oracle.setinputsizes() call is issued for
all bind parameters. This is required for LOB datatypes but can be
disabled to reduce overhead. Defaults to ``True``. Specific types
can be excluded from this process using the ``exclude_setinputsizes``
parameter.
* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail.
* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail.
* ``exclude_setinputsizes`` - a tuple or list of string DBAPI type names to
be excluded from the "auto setinputsizes" feature. The type names here
must match DBAPI types that are found in the "cx_Oracle" module namespace,
such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to
``(STRING, UNICODE)``.
.. versionadded:: 0.8 specific DBAPI types can be excluded from the
auto_setinputsizes feature via the exclude_setinputsizes attribute.
* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or
alternatively an integer value. This value is only available as a URL query
string argument.
* ``threaded`` - enable multithreaded access to cx_oracle connections.
Defaults to ``True``. Note that this is the opposite default of the
cx_Oracle DBAPI itself.
* ``service_name`` - An option to use connection string (DSN) with
``SERVICE_NAME`` instead of ``SID``. It can't be passed when a ``database``
part is given.
E.g. ``oracle+cx_oracle://scott:tiger@host:1521/?service_name=hr``
is a valid url. This value is only available as a URL query string argument.
.. versionadded:: 1.0.0
.. _cx_oracle_unicode:
Unicode
-------
The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the
ability to return string results as Python unicode objects natively.
When used in Python 3, cx_Oracle returns all strings as Python unicode objects
(that is, plain ``str`` in Python 3). In Python 2, it will return as Python
unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For
column values that are of type ``VARCHAR`` or other non-unicode string types,
it will return values as Python strings (e.g. bytestrings).
The cx_Oracle SQLAlchemy dialect presents two different options for the use
case of returning ``VARCHAR`` column values as Python unicode objects under
Python 2:
* the cx_Oracle DBAPI has the ability to coerce all string results to Python
unicode objects unconditionally using output type handlers. This has
the advantage that the unicode conversion is global to all statements
at the cx_Oracle driver level, meaning it works with raw textual SQL
statements that have no typing information associated. However, this system
has been observed to incur signfiicant performance overhead, not only
because it takes effect for all string values unconditionally, but also
because cx_Oracle under Python 2 seems to use a pure-Python function call in
order to do the decode operation, which under cPython can orders of
magnitude slower than doing it using C functions alone.
* SQLAlchemy has unicode-decoding services built in, and when using
SQLAlchemy's C extensions, these functions do not use any Python function
calls and are very fast. The disadvantage to this approach is that the
unicode conversion only takes effect for statements where the
:class:`.Unicode` type or :class:`.String` type with
``convert_unicode=True`` is explicitly associated with the result column.
This is the case for any ORM or Core query or SQL expression as well as for
a :func:`.text` construct that specifies output column types, so in the vast
majority of cases this is not an issue. However, when sending a completely
raw string to :meth:`.Connection.execute`, this typing information isn't
present, unless the string is handled within a :func:`.text` construct that
adds typing information.
As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's
typing system. This keeps cx_Oracle's expensive Python 2 approach
disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy
detects that cx_Oracle is returning unicode objects natively and cx_Oracle's
system is used.
To re-enable cx_Oracle's output type handler under Python 2, the
``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to
:func:`.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_unicode=True)
Alternatively, to run a pure string SQL statement and get ``VARCHAR`` results
as Python unicode under Python 2 without using cx_Oracle's native handlers,
the :func:`.text` feature can be used::
from sqlalchemy import text, Unicode
result = conn.execute(
text("select username from user").columns(username=Unicode))
.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used
for unicode results of non-unicode datatypes in Python 2, after they were
identified as a major performance bottleneck. SQLAlchemy's own unicode
facilities are used instead.
.. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable
cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior.
.. _cx_oracle_returning:
RETURNING Support
-----------------
The cx_oracle DBAPI supports a limited subset of Oracle's already limited
RETURNING support. Typically, results can only be guaranteed for at most one
column being returned; this is the typical case when SQLAlchemy uses RETURNING
to get just the value of a primary-key-associated sequence value.
Additional column expressions will cause problems in a non-determinative way,
due to cx_oracle's lack of support for the OCI_DATA_AT_EXEC API which is
required for more complex RETURNING scenarios.
For this reason, stability may be enhanced by disabling RETURNING support
completely; SQLAlchemy otherwise will use RETURNING to fetch newly
sequence-generated primary keys. As illustrated in :ref:`oracle_returning`::
engine = create_engine("oracle://scott:tiger@dsn",
implicit_returning=False)
.. seealso::
http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693
- OCI documentation for RETURNING
http://sourceforge.net/mailarchive/message.php?msg_id=31338136
- cx_oracle developer commentary
.. _cx_oracle_lob:
LOB Objects
-----------
cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy
converts these to strings so that the interface of the Binary type is
consistent with that of other backends, and so that the linkage to a live
cursor is not needed in scenarios like result.fetchmany() and
result.fetchall(). This means that by default, LOB objects are fully fetched
unconditionally by SQLAlchemy, and the linkage to a live cursor is broken.
To disable this processing, pass ``auto_convert_lobs=False`` to
:func:`.create_engine()`.
Two Phase Transaction Support
-----------------------------
Two Phase transactions are implemented using XA transactions, and are known
to work in a rudimental fashion with recent versions of cx_Oracle
as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet
considered to be robust and should still be regarded as experimental.
In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding
two phase which prevents
a particular DBAPI connection from being consistently usable in both
prepared transactions as well as traditional DBAPI usage patterns; therefore
once a particular connection is used via :meth:`.Connection.begin_prepared`,
all subsequent usages of the underlying DBAPI connection must be within
the context of prepared transactions.
The default behavior of :class:`.Engine` is to maintain a pool of DBAPI
connections. Therefore, due to the above glitch, a DBAPI connection that has
been used in a two-phase operation, and is then returned to the pool, will
not be usable in a non-two-phase context. To avoid this situation,
the application can make one of several choices:
* Disable connection pooling using :class:`.NullPool`
* Ensure that the particular :class:`.Engine` in use is only used
for two-phase operations. A :class:`.Engine` bound to an ORM
:class:`.Session` which includes ``twophase=True`` will consistently
use the two-phase transaction style.
* For ad-hoc two-phase operations without disabling pooling, the DBAPI
connection in use can be evicted from the connection pool using the
:meth:`.Connection.detach` method.
.. versionchanged:: 0.8.0b2,0.7.10
Support for cx_oracle prepared transactions has been implemented
and tested.
.. _cx_oracle_numeric:
Precision Numerics
------------------
The SQLAlchemy dialect goes through a lot of steps to ensure
that decimal numbers are sent and received with full accuracy.
An "outputtypehandler" callable is associated with each
cx_oracle connection object which detects numeric types and
receives them as string values, instead of receiving a Python
``float`` directly, which is then passed to the Python
``Decimal`` constructor. The :class:`.Numeric` and
:class:`.Float` types under the cx_oracle dialect are aware of
this behavior, and will coerce the ``Decimal`` to ``float`` if
the ``asdecimal`` flag is ``False`` (default on :class:`.Float`,
optional on :class:`.Numeric`).
Because the handler coerces to ``Decimal`` in all cases first,
the feature can detract significantly from performance.
If precision numerics aren't required, the decimal handling
can be disabled by passing the flag ``coerce_to_decimal=False``
to :func:`.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False)
.. versionadded:: 0.7.6
Add the ``coerce_to_decimal`` flag.
Another alternative to performance is to use the
`cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library;
see :class:`.Numeric` for additional notes.
The handler attempts to use the "precision" and "scale"
attributes of the result set column to best determine if
subsequent incoming values should be received as ``Decimal`` as
opposed to int (in which case no processing is added). There are
several scenarios where OCI_ does not provide unambiguous data
as to the numeric type, including some situations where
individual rows may return a combination of floating point and
integer values. Certain values for "precision" and "scale" have
been observed to determine this scenario. When it occurs, the
outputtypehandler receives as string and then passes off to a
processing function which detects, for each returned value, if a
decimal point is present, and if so converts to ``Decimal``,
otherwise to int. The intention is that simple int-based
statements like "SELECT my_seq.nextval() FROM DUAL" continue to
return ints and not ``Decimal`` objects, and that any kind of
floating point value is received as a string so that there is no
floating point loss of precision.
The "decimal point is present" logic itself is also sensitive to
locale. Under OCI_, this is controlled by the NLS_LANG
environment variable. Upon first connection, the dialect runs a
test to determine the current "decimal" character, which can be
a comma "," for European locales. From that point forward the
outputtypehandler uses that character to represent a decimal
point. Note that cx_oracle 5.0.3 or greater is required
when dealing with numerics with locale settings that don't use
a period "." as the decimal character.
.. versionchanged:: 0.6.6
The outputtypehandler supports the case where the locale uses a
comma "," character to represent a decimal point.
.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html
"""
from __future__ import absolute_import
from .base import OracleCompiler, OracleDialect, OracleExecutionContext
from . import base as oracle
from ...engine import result as _result
from sqlalchemy import types as sqltypes, util, exc, processors
import random
import collections
import decimal
import re
class _OracleNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
# cx_oracle accepts Decimal objects and floats
return None
def result_processor(self, dialect, coltype):
# we apply a cx_oracle type handler to all connections
# that converts floating point strings to Decimal().
# However, in some subquery situations, Oracle doesn't
# give us enough information to determine int or Decimal.
# It could even be int/Decimal differently on each row,
# regardless of the scale given for the originating type.
# So we still need an old school isinstance() handler
# here for decimals.
if dialect.supports_native_decimal:
if self.asdecimal:
fstring = "%%.%df" % self._effective_decimal_return_scale
def to_decimal(value):
if value is None:
return None
elif isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(fstring % value)
return to_decimal
else:
if self.precision is None and self.scale is None:
return processors.to_float
elif not getattr(self, '_is_oracle_number', False) \
and self.scale is not None:
return processors.to_float
else:
return None
else:
# cx_oracle 4 behavior, will assume
# floats
return super(_OracleNumeric, self).\
result_processor(dialect, coltype)
class _OracleDate(sqltypes.Date):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return value.date()
else:
return value
return process
class _LOBMixin(object):
def result_processor(self, dialect, coltype):
if not dialect.auto_convert_lobs:
# return the cx_oracle.LOB directly.
return None
def process(value):
if value is not None:
return value.read()
else:
return value
return process
class _NativeUnicodeMixin(object):
if util.py2k:
def bind_processor(self, dialect):
if dialect._cx_oracle_with_unicode:
def process(value):
if value is None:
return value
else:
return unicode(value)
return process
else:
return super(
_NativeUnicodeMixin, self).bind_processor(dialect)
# we apply a connection output handler that returns
# unicode in all cases, so the "native_unicode" flag
# will be set for the default String.result_processor.
class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_CHAR
class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR):
def get_dbapi_type(self, dbapi):
return getattr(dbapi, 'UNICODE', dbapi.STRING)
class _OracleText(_LOBMixin, sqltypes.Text):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
class _OracleLong(oracle.LONG):
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
def get_dbapi_type(self, dbapi):
return dbapi.LONG_STRING
class _OracleString(_NativeUnicodeMixin, sqltypes.String):
pass
class _OracleUnicodeText(
_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
def get_dbapi_type(self, dbapi):
return dbapi.NCLOB
def result_processor(self, dialect, coltype):
lob_processor = _LOBMixin.result_processor(self, dialect, coltype)
if lob_processor is None:
return None
string_processor = sqltypes.UnicodeText.result_processor(
self, dialect, coltype)
if string_processor is None:
return lob_processor
else:
def process(value):
return string_processor(lob_processor(value))
return process
class _OracleInteger(sqltypes.Integer):
def result_processor(self, dialect, coltype):
def to_int(val):
if val is not None:
val = int(val)
return val
return to_int
class _OracleBinary(_LOBMixin, sqltypes.LargeBinary):
def get_dbapi_type(self, dbapi):
return dbapi.BLOB
def bind_processor(self, dialect):
return None
class _OracleInterval(oracle.INTERVAL):
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
class _OracleRaw(oracle.RAW):
pass
class _OracleRowid(oracle.ROWID):
def get_dbapi_type(self, dbapi):
return dbapi.ROWID
class OracleCompiler_cx_oracle(OracleCompiler):
def bindparam_string(self, name, **kw):
quote = getattr(name, 'quote', None)
if quote is True or quote is not False and \
self.preparer._bindparam_requires_quotes(name):
quoted_name = '"%s"' % name
self._quoted_bind_names[name] = quoted_name
return OracleCompiler.bindparam_string(self, quoted_name, **kw)
else:
return OracleCompiler.bindparam_string(self, name, **kw)
class OracleExecutionContext_cx_oracle(OracleExecutionContext):
def pre_exec(self):
quoted_bind_names = \
getattr(self.compiled, '_quoted_bind_names', None)
if quoted_bind_names:
if not self.dialect.supports_unicode_statements:
# if DBAPI doesn't accept unicode statements,
# keys in self.parameters would have been encoded
# here. so convert names in quoted_bind_names
# to encoded as well.
quoted_bind_names = \
dict(
(fromname.encode(self.dialect.encoding),
toname.encode(self.dialect.encoding))
for fromname, toname in
quoted_bind_names.items()
)
for param in self.parameters:
for fromname, toname in quoted_bind_names.items():
param[toname] = param[fromname]
del param[fromname]
if self.dialect.auto_setinputsizes:
# cx_oracle really has issues when you setinputsizes
# on String, including that outparams/RETURNING
# breaks for varchars
self.set_input_sizes(
quoted_bind_names,
exclude_types=self.dialect.exclude_setinputsizes
)
# if a single execute, check for outparams
if len(self.compiled_parameters) == 1:
for bindparam in self.compiled.binds.values():
if bindparam.isoutparam:
dbtype = bindparam.type.dialect_impl(self.dialect).\
get_dbapi_type(self.dialect.dbapi)
if not hasattr(self, 'out_parameters'):
self.out_parameters = {}
if dbtype is None:
raise exc.InvalidRequestError(
"Cannot create out parameter for parameter "
"%r - its type %r is not supported by"
" cx_oracle" %
(bindparam.key, bindparam.type)
)
name = self.compiled.bind_names[bindparam]
self.out_parameters[name] = self.cursor.var(dbtype)
self.parameters[0][quoted_bind_names.get(name, name)] = \
self.out_parameters[name]
def create_cursor(self):
c = self._dbapi_connection.cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def get_result_proxy(self):
if hasattr(self, 'out_parameters') and self.compiled.returning:
returning_params = dict(
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
return ReturningResultProxy(self, returning_params)
result = None
if self.cursor.description is not None:
for column in self.cursor.description:
type_code = column[1]
if type_code in self.dialect._cx_oracle_binary_types:
result = _result.BufferedColumnResultProxy(self)
if result is None:
result = _result.ResultProxy(self)
if hasattr(self, 'out_parameters'):
if self.compiled_parameters is not None and \
len(self.compiled_parameters) == 1:
result.out_parameters = out_parameters = {}
for bind, name in self.compiled.bind_names.items():
if name in self.out_parameters:
type = bind.type
impl_type = type.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(
self.dialect.dbapi)
result_processor = impl_type.\
result_processor(self.dialect,
dbapi_type)
if result_processor is not None:
out_parameters[name] = \
result_processor(
self.out_parameters[name].getvalue())
else:
out_parameters[name] = self.out_parameters[
name].getvalue()
else:
result.out_parameters = dict(
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
return result
class OracleExecutionContext_cx_oracle_with_unicode(
OracleExecutionContext_cx_oracle):
"""Support WITH_UNICODE in Python 2.xx.
WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
behavior under Python 2.x. This mode in some cases disallows
and in other cases silently passes corrupted data when
non-Python-unicode strings (a.k.a. plain old Python strings)
are passed as arguments to connect(), the statement sent to execute(),
or any of the bind parameter keys or values sent to execute().
This optional context therefore ensures that all statements are
passed as Python unicode objects.
"""
def __init__(self, *arg, **kw):
OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw)
self.statement = util.text_type(self.statement)
def _execute_scalar(self, stmt):
return super(OracleExecutionContext_cx_oracle_with_unicode, self).\
_execute_scalar(util.text_type(stmt))
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""Result proxy which stuffs the _returning clause + outparams
into the fetch."""
def __init__(self, context, returning_params):
self._returning_params = returning_params
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
returning = self.context.compiled.returning
return [
("ret_%d" % i, None)
for i, col in enumerate(returning)
]
def _buffer_rows(self):
return collections.deque(
[tuple(self._returning_params["ret_%d" % i]
for i, c in enumerate(self._returning_params))]
)
class OracleDialect_cx_oracle(OracleDialect):
execution_ctx_cls = OracleExecutionContext_cx_oracle
statement_compiler = OracleCompiler_cx_oracle
driver = "cx_oracle"
colspecs = colspecs = {
sqltypes.Numeric: _OracleNumeric,
# generic type, assume datetime.date is desired
sqltypes.Date: _OracleDate,
sqltypes.LargeBinary: _OracleBinary,
sqltypes.Boolean: oracle._OracleBoolean,
sqltypes.Interval: _OracleInterval,
oracle.INTERVAL: _OracleInterval,
sqltypes.Text: _OracleText,
sqltypes.String: _OracleString,
sqltypes.UnicodeText: _OracleUnicodeText,
sqltypes.CHAR: _OracleChar,
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
oracle.LONG: _OracleLong,
# this is only needed for OUT parameters.
# it would be nice if we could not use it otherwise.
sqltypes.Integer: _OracleInteger,
oracle.RAW: _OracleRaw,
sqltypes.Unicode: _OracleNVarChar,
sqltypes.NVARCHAR: _OracleNVarChar,
oracle.ROWID: _OracleRowid,
}
execute_sequence_format = list
def __init__(self,
auto_setinputsizes=True,
exclude_setinputsizes=("STRING", "UNICODE"),
auto_convert_lobs=True,
threaded=True,
allow_twophase=True,
coerce_to_decimal=True,
coerce_to_unicode=False,
arraysize=50, **kwargs):
OracleDialect.__init__(self, **kwargs)
self.threaded = threaded
self.arraysize = arraysize
self.allow_twophase = allow_twophase
self.supports_timestamp = self.dbapi is None or \
hasattr(self.dbapi, 'TIMESTAMP')
self.auto_setinputsizes = auto_setinputsizes
self.auto_convert_lobs = auto_convert_lobs
if hasattr(self.dbapi, 'version'):
self.cx_oracle_ver = tuple([int(x) for x in
self.dbapi.version.split('.')])
else:
self.cx_oracle_ver = (0, 0, 0)
def types(*names):
return set(
getattr(self.dbapi, name, None) for name in names
).difference([None])
self.exclude_setinputsizes = types(*(exclude_setinputsizes or ()))
self._cx_oracle_string_types = types("STRING", "UNICODE",
"NCLOB", "CLOB")
self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
self.coerce_to_unicode = (
self.cx_oracle_ver >= (5, 0) and
coerce_to_unicode
)
self.supports_native_decimal = (
self.cx_oracle_ver >= (5, 0) and
coerce_to_decimal
)
self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0)
if self.cx_oracle_ver is None:
# this occurs in tests with mock DBAPIs
self._cx_oracle_string_types = set()
self._cx_oracle_with_unicode = False
elif self.cx_oracle_ver >= (5,) and not \
hasattr(self.dbapi, 'UNICODE'):
# cx_Oracle WITH_UNICODE mode. *only* python
# unicode objects accepted for anything
self.supports_unicode_statements = True
self.supports_unicode_binds = True
self._cx_oracle_with_unicode = True
if util.py2k:
# There's really no reason to run with WITH_UNICODE under
# Python 2.x. Give the user a hint.
util.warn(
"cx_Oracle is compiled under Python 2.xx using the "
"WITH_UNICODE flag. Consider recompiling cx_Oracle "
"without this flag, which is in no way necessary for "
"full support of Unicode. Otherwise, all string-holding "
"bind parameters must be explicitly typed using "
"SQLAlchemy's String type or one of its subtypes,"
"or otherwise be passed as Python unicode. "
"Plain Python strings passed as bind parameters will be "
"silently corrupted by cx_Oracle."
)
self.execution_ctx_cls = \
OracleExecutionContext_cx_oracle_with_unicode
else:
self._cx_oracle_with_unicode = False
if self.cx_oracle_ver is None or \
not self.auto_convert_lobs or \
not hasattr(self.dbapi, 'CLOB'):
self.dbapi_type_map = {}
else:
# only use this for LOB objects. using it for strings, dates
# etc. leads to a little too much magic, reflection doesn't know
# if it should expect encoded strings or unicodes, etc.
self.dbapi_type_map = {
self.dbapi.CLOB: oracle.CLOB(),
self.dbapi.NCLOB: oracle.NCLOB(),
self.dbapi.BLOB: oracle.BLOB(),
self.dbapi.BINARY: oracle.RAW(),
}
@classmethod
def dbapi(cls):
import cx_Oracle
return cx_Oracle
def initialize(self, connection):
super(OracleDialect_cx_oracle, self).initialize(connection)
if self._is_oracle_8:
self.supports_unicode_binds = False
self._detect_decimal_char(connection)
def _detect_decimal_char(self, connection):
"""detect if the decimal separator character is not '.', as
is the case with European locale settings for NLS_LANG.
cx_oracle itself uses similar logic when it formats Python
Decimal objects to strings on the bind side (as of 5.0.3),
as Oracle sends/receives string numerics only in the
current locale.
"""
if self.cx_oracle_ver < (5,):
# no output type handlers before version 5
return
cx_Oracle = self.dbapi
conn = connection.connection
# override the output_type_handler that's
# on the cx_oracle connection with a plain
# one on the cursor
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
return cursor.var(
cx_Oracle.STRING,
255, arraysize=cursor.arraysize)
cursor = conn.cursor()
cursor.outputtypehandler = output_type_handler
cursor.execute("SELECT 0.1 FROM DUAL")
val = cursor.fetchone()[0]
cursor.close()
char = re.match(r"([\.,])", val).group(1)
if char != '.':
_detect_decimal = self._detect_decimal
self._detect_decimal = \
lambda value: _detect_decimal(value.replace(char, '.'))
self._to_decimal = \
lambda value: decimal.Decimal(value.replace(char, '.'))
def _detect_decimal(self, value):
if "." in value:
return decimal.Decimal(value)
else:
return int(value)
_to_decimal = decimal.Decimal
def on_connect(self):
if self.cx_oracle_ver < (5,):
# no output type handlers before version 5
return
cx_Oracle = self.dbapi
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
# convert all NUMBER with precision + positive scale to Decimal
# this almost allows "native decimal" mode.
if self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER and \
precision and scale > 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._to_decimal,
arraysize=cursor.arraysize)
# if NUMBER with zero precision and 0 or neg scale, this appears
# to indicate "ambiguous". Use a slower converter that will
# make a decision based on each value received - the type
# may change from row to row (!). This kills
# off "native decimal" mode, handlers still needed.
elif self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER \
and not precision and scale <= 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._detect_decimal,
arraysize=cursor.arraysize)
# allow all strings to come back natively as Unicode
elif self.coerce_to_unicode and \
defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(util.text_type, size, cursor.arraysize)
def on_connect(conn):
conn.outputtypehandler = output_type_handler
return on_connect
def create_connect_args(self, url):
dialect_opts = dict(url.query)
for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs',
'threaded', 'allow_twophase'):
if opt in dialect_opts:
util.coerce_kw_type(dialect_opts, opt, bool)
setattr(self, opt, dialect_opts[opt])
database = url.database
service_name = dialect_opts.get('service_name', None)
if database or service_name:
# if we have a database, then we have a remote host
port = url.port
if port:
port = int(port)
else:
port = 1521
if database and service_name:
raise exc.InvalidRequestError(
'"service_name" option shouldn\'t '
'be used with a "database" part of the url')
if database:
makedsn_kwargs = {'sid': database}
if service_name:
makedsn_kwargs = {'service_name': service_name}
dsn = self.dbapi.makedsn(url.host, port, **makedsn_kwargs)
else:
# we have a local tnsname
dsn = url.host
opts = dict(
user=url.username,
password=url.password,
dsn=dsn,
threaded=self.threaded,
twophase=self.allow_twophase,
)
if util.py2k:
if self._cx_oracle_with_unicode:
for k, v in opts.items():
if isinstance(v, str):
opts[k] = unicode(v)
else:
for k, v in opts.items():
if isinstance(v, unicode):
opts[k] = str(v)
if 'mode' in url.query:
opts['mode'] = url.query['mode']
if isinstance(opts['mode'], util.string_types):
mode = opts['mode'].upper()
if mode == 'SYSDBA':
opts['mode'] = self.dbapi.SYSDBA
elif mode == 'SYSOPER':
opts['mode'] = self.dbapi.SYSOPER
else:
util.coerce_kw_type(opts, 'mode', int)
return ([], opts)
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.version.split('.')
)
def is_disconnect(self, e, connection, cursor):
error, = e.args
if isinstance(e, self.dbapi.InterfaceError):
return "not connected" in str(e)
elif hasattr(error, 'code'):
# ORA-00028: your session has been killed
# ORA-03114: not connected to ORACLE
# ORA-03113: end-of-file on communication channel
# ORA-03135: connection lost contact
# ORA-01033: ORACLE initialization or shutdown in progress
# ORA-02396: exceeded maximum idle time, please connect again
# TODO: Others ?
return error.code in (28, 3114, 3113, 3135, 1033, 2396)
else:
return False
def create_xid(self):
"""create a two-phase transaction ID.
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). its format is unspecified."""
id = random.randint(0, 2 ** 128)
return (0x1234, "%032x" % id, "%032x" % 9)
def do_executemany(self, cursor, statement, parameters, context=None):
if isinstance(parameters, tuple):
parameters = list(parameters)
cursor.executemany(statement, parameters)
def do_begin_twophase(self, connection, xid):
connection.connection.begin(*xid)
def do_prepare_twophase(self, connection, xid):
result = connection.connection.prepare()
connection.info['cx_oracle_prepared'] = result
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_commit(connection.connection)
else:
oci_prepared = connection.info['cx_oracle_prepared']
if oci_prepared:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
connection.info.pop('cx_oracle_prepared', None)
dialect = OracleDialect_cx_oracle
| gpl-2.0 |
pedrobaeza/odoo | addons/website_event_sale/controllers/main.py | 54 | 2827 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website_event.controllers.main import website_event
from openerp.tools.translate import _
class website_event(website_event):
@http.route(['/event/cart/update'], type='http', auth="public", methods=['POST'], website=True)
def cart_update(self, event_id, **post):
cr, uid, context = request.cr, request.uid, request.context
ticket_obj = request.registry.get('event.event.ticket')
sale = False
for key, value in post.items():
quantity = int(value or "0")
if not quantity:
continue
sale = True
ticket_id = key.split("-")[0] == 'ticket' and int(key.split("-")[1]) or None
ticket = ticket_obj.browse(cr, SUPERUSER_ID, ticket_id, context=context)
request.website.sale_get_order(force_create=1)._cart_update(
product_id=ticket.product_id.id, add_qty=quantity, context=dict(context, event_ticket_id=ticket.id))
if not sale:
return request.redirect("/event/%s" % event_id)
return request.redirect("/shop/checkout")
def _add_event(self, event_name="New Event", context={}, **kwargs):
try:
dummy, res_id = request.registry.get('ir.model.data').get_object_reference(request.cr, request.uid, 'event_sale', 'product_product_event')
context['default_event_ticket_ids'] = [[0,0,{
'name': _('Subscription'),
'product_id': res_id,
'deadline' : False,
'seats_max': 1000,
'price': 0,
}]]
except ValueError:
pass
return super(website_event, self)._add_event(event_name, context, **kwargs)
| agpl-3.0 |
jswope00/GAI | common/djangoapps/student/management/commands/assigngroups.py | 68 | 3033 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from student.models import UserTestGroup
import random
import sys
import datetime
import json
from pytz import UTC
def group_from_value(groups, v):
''' Given group: (('a',0.3),('b',0.4),('c',0.3)) And random value
in [0,1], return the associated group (in the above case, return
'a' if v<0.3, 'b' if 0.3<=v<0.7, and 'c' if v>0.7
'''
sum = 0
for (g, p) in groups:
sum = sum + p
if sum > v:
return g
return g # For round-off errors
class Command(BaseCommand):
help = \
''' Assign users to test groups. Takes a list
of groups:
a:0.3,b:0.4,c:0.3 file.txt "Testing something"
Will assign each user to group a, b, or c with
probability 0.3, 0.4, 0.3. Probabilities must
add up to 1.
Will log what happened to file.txt.
'''
def handle(self, *args, **options):
if len(args) != 3:
print "Invalid number of options"
sys.exit(-1)
# Extract groups from string
group_strs = [x.split(':') for x in args[0].split(',')]
groups = [(group, float(value)) for group, value in group_strs]
print "Groups", groups
## Confirm group probabilities add up to 1
total = sum(zip(*groups)[1])
print "Total:", total
if abs(total - 1) > 0.01:
print "Total not 1"
sys.exit(-1)
## Confirm groups don't already exist
for group in dict(groups):
if UserTestGroup.objects.filter(name=group).count() != 0:
print group, "already exists!"
sys.exit(-1)
group_objects = {}
f = open(args[1], "a+")
## Create groups
for group in dict(groups):
utg = UserTestGroup()
utg.name = group
utg.description = json.dumps({"description": args[2]},
{"time": datetime.datetime.now(UTC).isoformat()})
group_objects[group] = utg
group_objects[group].save()
## Assign groups
users = list(User.objects.all())
count = 0
for user in users:
if count % 1000 == 0:
print count
count = count + 1
v = random.uniform(0, 1)
group = group_from_value(groups, v)
group_objects[group].users.add(user)
f.write("Assigned user {name} ({id}) to {group}\n".format(name=user.username,
id=user.id,
group=group))
## Save groups
for group in group_objects:
group_objects[group].save()
f.close()
# python manage.py assigngroups summary_test:0.3,skip_summary_test:0.7 log.txt "Do previews of future materials help?"
# python manage.py assigngroups skip_capacitor:0.3,capacitor:0.7 log.txt "Do we show capacitor in linearity tutorial?"
| agpl-3.0 |
PepperPD/edx-pepper-platform | common/djangoapps/track/views.py | 10 | 5519 | import json
import logging
import pytz
import datetime
import dateutil.parser
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import redirect
from django.conf import settings
from mitxmako.shortcuts import render_to_response
from django_future.csrf import ensure_csrf_cookie
from track.models import TrackingLog
from pytz import UTC
log = logging.getLogger("tracking")
LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', 'page', 'time', 'host']
def log_event(event):
"""Write tracking event to log file, and optionally to TrackingLog model."""
event_str = json.dumps(event)
log.info(event_str[:settings.TRACK_MAX_EVENT])
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
event['time'] = dateutil.parser.parse(event['time'])
tldat = TrackingLog(**dict((x, event[x]) for x in LOGFIELDS))
try:
tldat.save()
except Exception as err:
log.exception(err)
def user_track(request):
"""
Log when POST call to "event" URL is made by a user. Uses request.REQUEST
to allow for GET calls.
GET or POST call should provide "event_type", "event", and "page" arguments.
"""
try: # TODO: Do the same for many of the optional META parameters
username = request.user.username
except:
username = "anonymous"
try:
scookie = request.META['HTTP_COOKIE'] # Get cookies
scookie = ";".join([c.split('=')[1] for c in scookie.split(";") if "sessionid" in c]).strip() # Extract session ID
except:
scookie = ""
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"session": scookie,
"ip": request.META['REMOTE_ADDR'],
"event_source": "browser",
"event_type": request.REQUEST['event_type'],
"event": request.REQUEST['event'],
"agent": agent,
"page": request.REQUEST['page'],
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
log_event(event)
return HttpResponse('success')
def server_track(request, event_type, event, page=None):
"""Log events related to server requests."""
try:
username = request.user.username
except:
username = "anonymous"
try:
agent = request.META['HTTP_USER_AGENT']
except:
agent = ''
event = {
"username": username,
"ip": request.META['REMOTE_ADDR'],
"event_source": "server",
"event_type": event_type,
"event": event,
"agent": agent,
"page": page,
"time": datetime.datetime.now(UTC).isoformat(),
"host": request.META['SERVER_NAME'],
}
if event_type.startswith("/event_logs") and request.user.is_staff: # don't log
return
log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow().isoformat(),
"host": request_info.get('host', 'unknown')
}
log_event(event)
@login_required
@ensure_csrf_cookie
def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff:
return redirect('/')
nlen = 100
username = ''
if args:
for arg in args.split('/'):
if arg.isdigit():
nlen = int(arg)
if arg.startswith('username='):
username = arg[9:]
record_instances = TrackingLog.objects.all().order_by('-time')
if username:
record_instances = record_instances.filter(username=username)
record_instances = record_instances[0:nlen]
# fix dtstamp
fmt = '%a %d-%b-%y %H:%M:%S' # "%Y-%m-%d %H:%M:%S %Z%z"
for rinst in record_instances:
rinst.dtstr = rinst.time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern')).strftime(fmt)
return render_to_response('tracking_log.html', {'records': record_instances})
| agpl-3.0 |
leiferikb/bitpop-private | chrome/test/functional/memory.py | 79 | 5230 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import time
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
class MemoryTest(pyauto.PyUITest):
"""Tests for memory usage of Chrome-related processes.
These tests are meant to be used manually, not as part of the continuous
test cycle. This is because each test starts up and periodically
measures/records the memory usage of a relevant Chrome process, doing so
repeatedly until the test is manually killed. Currently, this script only
works in Linux and ChromeOS, as it uses a Linux shell command to query the
system for process memory usage info (test_utils.GetMemoryUsageOfProcess()).
The tests in this suite produce the following output files (relative to the
current working directory):
testTabRendererProcessMemoryUsage: 'renderer_process_mem.txt'
testExtensionProcessMemoryUsage: 'extension_process_mem.txt'
"""
# Constants for all tests in this suite.
NUM_SECONDS_BETWEEN_MEASUREMENTS = 10
MEASUREMENT_LOG_MESSAGE_TEMPLATE = '[%s] %.2f MB (pid: %d)'
LOG_TO_OUTPUT_FILE = True
# Constants for testTabRendererProcessMemoryUsage.
RENDERER_PROCESS_URL = 'http://chrome.angrybirds.com'
RENDERER_PROCESS_OUTPUT_FILE = 'renderer_process_mem.txt'
# Constants for testExtensionProcessMemoryUsage.
EXTENSION_LOCATION = os.path.abspath(os.path.join(
pyauto.PyUITest.DataDir(), 'extensions', 'google_talk.crx'))
EXTENSION_PROCESS_NAME = 'Google Talk'
EXTENSION_PROCESS_OUTPUT_FILE = 'extension_process_mem.txt'
def _GetPidOfExtensionProcessByName(self, name):
"""Identifies the process ID of an extension process, given its name.
Args:
name: The string name of an extension process, as returned by the function
GetBrowserInfo().
Returns:
The integer process identifier (PID) for the specified process, or
None if the PID cannot be identified.
"""
info = self.GetBrowserInfo()['extension_views']
pid = [x['pid'] for x in info if x['name'] == '%s' % name]
if pid:
return pid[0]
return None
def _LogMessage(self, log_file, msg):
"""Logs a message to the screen, and to a log file if necessary.
Args:
log_file: The string name of a log file to which to write.
msg: The message to log.
"""
print msg
sys.stdout.flush()
if self.LOG_TO_OUTPUT_FILE:
print >>open(log_file, 'a'), msg
def testTabRendererProcessMemoryUsage(self):
"""Test the memory usage of the renderer process for a tab.
This test periodically queries the system for the current memory usage
of a tab's renderer process. The test will take measurements forever; you
must manually kill the test to terminate it.
"""
if (self.LOG_TO_OUTPUT_FILE and
os.path.exists(self.RENDERER_PROCESS_OUTPUT_FILE)):
os.remove(self.RENDERER_PROCESS_OUTPUT_FILE)
self.NavigateToURL(self.RENDERER_PROCESS_URL)
self._LogMessage(
self.RENDERER_PROCESS_OUTPUT_FILE,
'Memory usage for renderer process of a tab navigated to: "%s"' % (
self.RENDERER_PROCESS_URL))
# A user must manually kill this test to terminate the following loop.
while True:
pid = self.GetBrowserInfo()['windows'][0]['tabs'][0]['renderer_pid']
usage = test_utils.GetMemoryUsageOfProcess(pid)
current_time = time.asctime(time.localtime(time.time()))
self._LogMessage(
self.RENDERER_PROCESS_OUTPUT_FILE,
self.MEASUREMENT_LOG_MESSAGE_TEMPLATE % (current_time, usage, pid))
time.sleep(self.NUM_SECONDS_BETWEEN_MEASUREMENTS)
def testExtensionProcessMemoryUsage(self):
"""Test the memory usage of an extension process.
This test periodically queries the system for the current memory usage
of an extension process. The test will take measurements forever; you
must manually kill the test to terminate it.
"""
if (self.LOG_TO_OUTPUT_FILE and
os.path.exists(self.EXTENSION_PROCESS_OUTPUT_FILE)):
os.remove(self.EXTENSION_PROCESS_OUTPUT_FILE)
self.InstallExtension(self.EXTENSION_LOCATION)
# The PID is 0 until the extension has a chance to start up.
self.WaitUntil(
lambda: self._GetPidOfExtensionProcessByName(
self.EXTENSION_PROCESS_NAME) not in [0, None])
self._LogMessage(
self.EXTENSION_PROCESS_OUTPUT_FILE,
'Memory usage for extension process with name: "%s"' % (
self.EXTENSION_PROCESS_NAME))
# A user must manually kill this test to terminate the following loop.
while True:
pid = self._GetPidOfExtensionProcessByName(self.EXTENSION_PROCESS_NAME)
usage = test_utils.GetMemoryUsageOfProcess(pid)
current_time = time.asctime(time.localtime(time.time()))
self._LogMessage(
self.EXTENSION_PROCESS_OUTPUT_FILE,
self.MEASUREMENT_LOG_MESSAGE_TEMPLATE % (current_time, usage, pid))
time.sleep(self.NUM_SECONDS_BETWEEN_MEASUREMENTS)
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
GunnerJnr/_CodeInstitute | Stream-3/Full-Stack-Development/17.Create-A-Django-Based-Forum/6.Enable-Or-Disable-Site-Functionality/we_are_social/threads/models.py | 11 | 1346 | # -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.utils import timezone
from tinymce.models import HTMLField
# Create your models here.
class Subject(models.Model):
"""
name:
description: is a new field that comes packaged with
django-tinymce. It enables our field to render the WYSIWYG editor
in our admin
"""
name = models.CharField(max_length=255)
description = HTMLField()
def __unicode__(self):
return self.name
class Thread(models.Model):
"""
name:
user:
subject: the thread subject
created_at: record the time the posts and threads are created
"""
name = models.CharField(max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='threads')
subject = models.ForeignKey(Subject, related_name='threads')
created_at = models.DateTimeField(default=timezone.now)
class Post(models.Model):
"""
thread:
comment:
user: link back tot he user who created the post
created_at: record the time the posts and threads are created
"""
thread = models.ForeignKey(Thread, related_name='posts')
comment = HTMLField(blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='posts')
created_at = models.DateTimeField(default=timezone.now)
| mit |
jnewland/home-assistant | homeassistant/auth/permissions/util.py | 5 | 3442 | """Helpers to deal with permissions."""
from functools import wraps
from typing import Callable, Dict, List, Optional, Union, cast # noqa: F401
from .const import SUBCAT_ALL
from .models import PermissionLookup
from .types import CategoryType, SubCategoryDict, ValueType
LookupFunc = Callable[[PermissionLookup, SubCategoryDict, str],
Optional[ValueType]]
SubCatLookupType = Dict[str, LookupFunc]
def lookup_all(perm_lookup: PermissionLookup, lookup_dict: SubCategoryDict,
object_id: str) -> ValueType:
"""Look up permission for all."""
# In case of ALL category, lookup_dict IS the schema.
return cast(ValueType, lookup_dict)
def compile_policy(
policy: CategoryType, subcategories: SubCatLookupType,
perm_lookup: PermissionLookup
) -> Callable[[str, str], bool]: # noqa
"""Compile policy into a function that tests policy.
Subcategories are mapping key -> lookup function, ordered by highest
priority first.
"""
# None, False, empty dict
if not policy:
def apply_policy_deny_all(entity_id: str, key: str) -> bool:
"""Decline all."""
return False
return apply_policy_deny_all
if policy is True:
def apply_policy_allow_all(entity_id: str, key: str) -> bool:
"""Approve all."""
return True
return apply_policy_allow_all
assert isinstance(policy, dict)
funcs = [] # type: List[Callable[[str, str], Union[None, bool]]]
for key, lookup_func in subcategories.items():
lookup_value = policy.get(key)
# If any lookup value is `True`, it will always be positive
if isinstance(lookup_value, bool):
return lambda object_id, key: True
if lookup_value is not None:
funcs.append(_gen_dict_test_func(
perm_lookup, lookup_func, lookup_value))
if len(funcs) == 1:
func = funcs[0]
@wraps(func)
def apply_policy_func(object_id: str, key: str) -> bool:
"""Apply a single policy function."""
return func(object_id, key) is True
return apply_policy_func
def apply_policy_funcs(object_id: str, key: str) -> bool:
"""Apply several policy functions."""
for func in funcs:
result = func(object_id, key)
if result is not None:
return result
return False
return apply_policy_funcs
def _gen_dict_test_func(
perm_lookup: PermissionLookup,
lookup_func: LookupFunc,
lookup_dict: SubCategoryDict
) -> Callable[[str, str], Optional[bool]]: # noqa
"""Generate a lookup function."""
def test_value(object_id: str, key: str) -> Optional[bool]:
"""Test if permission is allowed based on the keys."""
schema = lookup_func(
perm_lookup, lookup_dict, object_id) # type: ValueType
if schema is None or isinstance(schema, bool):
return schema
assert isinstance(schema, dict)
return schema.get(key)
return test_value
def test_all(policy: CategoryType, key: str) -> bool:
"""Test if a policy has an ALL access for a specific key."""
if not isinstance(policy, dict):
return bool(policy)
all_policy = policy.get(SUBCAT_ALL)
if not isinstance(all_policy, dict):
return bool(all_policy)
return all_policy.get(key, False)
| apache-2.0 |
hntrmrrs/capnproto | doc/_plugins/capnp_lexer.py | 35 | 2103 | #! /usr/bin/env python
from pygments.lexer import RegexLexer
from pygments.token import *
class CapnpLexer(RegexLexer):
name = "Cap'n Proto lexer"
aliases = ['capnp']
filenames = ['*.capnp']
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|extends|in|of|on|as|with|from|fixed)\b',
Token.Keyword),
(r'[a-zA-Z0-9_.]+', Token.Name),
(r'[^#@=:$a-zA-Z0-9_]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
(r'', Name.Class, '#pop')
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
(r'', Name.Class, '#pop')
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
(r'', Literal, '#pop')
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
(r'', Literal, '#pop')
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
(r'', Name.Attribute, '#pop')
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
(r'', Name.Attribute, '#pop')
],
}
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(name = "CapnpPygmentsLexer",
version = "0.1",
packages = find_packages(),
py_modules = [ 'capnp_lexer' ],
entry_points = {'pygments.lexers': 'capnp = capnp_lexer:CapnpLexer'})
| mit |
invisiblek/python-for-android | python3-alpha/python3-src/Lib/test/pystone.py | 122 | 7381 | #! /usr/bin/env python3
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print("Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime))
print("This machine benchmarks at %g pystones/second" % stones)
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = [x[:] for x in [Array1Glob]*51]
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print(msg, end=' ', file=sys.stderr)
print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
| apache-2.0 |
chargrizzle/appengine-mapreduce | python/src/mapreduce/context.py | 45 | 14669 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce execution context.
Mapreduce context provides handler code with information about
current mapreduce execution and organizes utility data flow
from handlers such as counters, log messages, mutation pools.
"""
__all__ = ["get",
"Pool",
"Context",
"COUNTER_MAPPER_CALLS",
"COUNTER_MAPPER_WALLTIME_MS",
"DATASTORE_DEADLINE",
"MAX_ENTITY_COUNT",
]
import heapq
import logging
import threading
try:
from google.appengine.ext import ndb
except ImportError:
ndb = None
from google.appengine.api import datastore
from google.appengine.ext import db
from google.appengine.runtime import apiproxy_errors
# Maximum number of items. Pool will be flushed when reaches this amount.
# Datastore API is smart to group entities into as few RPCs as possible without
# exceeding RPC size. So in theory, MAX_ENTITY_COUNT can be as big as
# the instance's memory can hold.
# This number is just an estimate.
# TODO(user): Do batching by entity size if cheap. b/10427424
MAX_ENTITY_COUNT = 20
# Deadline in seconds for mutation pool datastore operations.
DATASTORE_DEADLINE = 15
# The name of the counter which counts all mapper calls.
COUNTER_MAPPER_CALLS = "mapper-calls"
# Total walltime in msec given to mapper process. This is not just mapper
# hundler function, but includes all i/o overhead.
COUNTER_MAPPER_WALLTIME_MS = "mapper-walltime-ms"
# pylint: disable=protected-access
# pylint: disable=g-bad-name
def _normalize_entity(value):
"""Return an entity from an entity or model instance."""
if ndb is not None and isinstance(value, ndb.Model):
return None
if getattr(value, "_populate_internal_entity", None):
return value._populate_internal_entity()
return value
def _normalize_key(value):
"""Return a key from an entity, model instance, key, or key string."""
if ndb is not None and isinstance(value, (ndb.Model, ndb.Key)):
return None
if getattr(value, "key", None):
return value.key()
elif isinstance(value, basestring):
return datastore.Key(value)
else:
return value
class _ItemList(object):
"""A buffer that holds arbitrary items and auto flushes them when full.
Callers of this class provides the logic on how to flush.
This class takes care of the common logic of when to flush and when to retry.
Properties:
items: list of objects.
length: length of item list.
size: aggregate item size in bytes.
"""
DEFAULT_RETRIES = 3
_LARGEST_ITEMS_TO_LOG = 5
def __init__(self,
max_entity_count,
flush_function,
timeout_retries=DEFAULT_RETRIES,
repr_function=None):
"""Constructor.
Args:
max_entity_count: maximum number of entities before flushing it to db.
flush_function: a function that can flush the items. The function is
called with a list of items as the first argument, a dict of options
as second argument. Currently options can contain {"deadline": int}.
see self.flush on how the function is called.
timeout_retries: how many times to retry upon timeouts.
repr_function: a function that turns an item into meaningful
representation. For debugging large items.
"""
self.items = []
self.__max_entity_count = int(max_entity_count)
self.__flush_function = flush_function
self.__repr_function = repr_function
self.__timeout_retries = int(timeout_retries)
def __str__(self):
return "ItemList of with %s items" % len(self.items)
def append(self, item):
"""Add new item to the list.
If needed, append will first flush existing items and clear existing items.
Args:
item: an item to add to the list.
"""
if self.should_flush():
self.flush()
self.items.append(item)
def flush(self):
"""Force a flush."""
if not self.items:
return
retry = 0
options = {"deadline": DATASTORE_DEADLINE}
while retry <= self.__timeout_retries:
try:
self.__flush_function(self.items, options)
self.clear()
break
except db.Timeout, e:
logging.warning(e)
logging.warning("Flushing '%s' timed out. Will retry for the %s time.",
self, retry)
retry += 1
options["deadline"] *= 2
except apiproxy_errors.RequestTooLargeError:
self._log_largest_items()
raise
else:
raise
def _log_largest_items(self):
if not self.__repr_function:
logging.error("Got RequestTooLargeError but can't interpret items in "
"_ItemList %s.", self)
return
sizes = [len(self.__repr_function(i)) for i in self.items]
largest = heapq.nlargest(self._LARGEST_ITEMS_TO_LOG,
zip(sizes, self.items),
lambda t: t[0])
# Set field for for test only.
self._largest = [(s, self.__repr_function(i)) for s, i in largest]
logging.error("Got RequestTooLargeError. Largest items: %r", self._largest)
def clear(self):
"""Clear item list."""
self.items = []
def should_flush(self):
"""Whether to flush before append the next entity.
Returns:
True to flush. False other.
"""
return len(self.items) >= self.__max_entity_count
class Pool(object):
"""Mutation pool accumulates changes to perform them in patch.
Any Pool subclass should not be public. Instead, Pool should define an
operation.Operation class and let user uses that. For example, in a map
function, user can do:
def map(foo):
yield OperationOnMyPool(any_argument)
Since Operation is a callable object, Mapreduce library will invoke
any Operation object that is yielded with context.Context instance.
The operation object can then access MyPool from Context.get_pool.
"""
def flush(self):
"""Flush all changes."""
raise NotImplementedError()
class _MutationPool(Pool):
"""Mutation pool accumulates datastore changes to perform them in batch.
Properties:
puts: _ItemList of entities to put to datastore.
deletes: _ItemList of keys to delete from datastore.
ndb_puts: _ItemList of ndb entities to put to datastore.
ndb_deletes: _ItemList of ndb keys to delete from datastore.
"""
def __init__(self,
max_entity_count=MAX_ENTITY_COUNT,
mapreduce_spec=None):
"""Constructor.
Args:
max_entity_count: maximum number of entities before flushing it to db.
mapreduce_spec: An optional instance of MapperSpec.
"""
self.max_entity_count = max_entity_count
params = mapreduce_spec.params if mapreduce_spec is not None else {}
self.force_writes = bool(params.get("force_ops_writes", False))
self.puts = _ItemList(max_entity_count,
self._flush_puts,
repr_function=self._db_repr)
self.deletes = _ItemList(max_entity_count,
self._flush_deletes)
self.ndb_puts = _ItemList(max_entity_count,
self._flush_ndb_puts,
repr_function=self._ndb_repr)
self.ndb_deletes = _ItemList(max_entity_count,
self._flush_ndb_deletes)
def put(self, entity):
"""Registers entity to put to datastore.
Args:
entity: an entity or model instance to put.
"""
actual_entity = _normalize_entity(entity)
if actual_entity is None:
return self.ndb_put(entity)
self.puts.append(actual_entity)
def ndb_put(self, entity):
"""Like put(), but for NDB entities."""
assert ndb is not None and isinstance(entity, ndb.Model)
self.ndb_puts.append(entity)
def delete(self, entity):
"""Registers entity to delete from datastore.
Args:
entity: an entity, model instance, or key to delete.
"""
key = _normalize_key(entity)
if key is None:
return self.ndb_delete(entity)
self.deletes.append(key)
def ndb_delete(self, entity_or_key):
"""Like delete(), but for NDB entities/keys."""
if ndb is not None and isinstance(entity_or_key, ndb.Model):
key = entity_or_key.key
else:
key = entity_or_key
self.ndb_deletes.append(key)
def flush(self):
"""Flush(apply) all changed to datastore."""
self.puts.flush()
self.deletes.flush()
self.ndb_puts.flush()
self.ndb_deletes.flush()
@classmethod
def _db_repr(cls, entity):
"""Converts entity to a readable repr.
Args:
entity: datastore.Entity or datastore_types.Key.
Returns:
Proto in str.
"""
return str(entity._ToPb())
@classmethod
def _ndb_repr(cls, entity):
"""Converts entity to a readable repr.
Args:
entity: ndb.Model
Returns:
Proto in str.
"""
return str(entity._to_pb())
def _flush_puts(self, items, options):
"""Flush all puts to datastore."""
datastore.Put(items, config=self._create_config(options))
def _flush_deletes(self, items, options):
"""Flush all deletes to datastore."""
datastore.Delete(items, config=self._create_config(options))
def _flush_ndb_puts(self, items, options):
"""Flush all NDB puts to datastore."""
assert ndb is not None
ndb.put_multi(items, config=self._create_config(options))
def _flush_ndb_deletes(self, items, options):
"""Flush all deletes to datastore."""
assert ndb is not None
ndb.delete_multi(items, config=self._create_config(options))
def _create_config(self, options):
"""Creates datastore Config.
Returns:
A datastore_rpc.Configuration instance.
"""
return datastore.CreateConfig(deadline=options["deadline"],
force_writes=self.force_writes)
class _Counters(Pool):
"""Regulates access to counters.
Counters Pool is a str to int map. It is saved as part of ShardState so it
is flushed when ShardState commits to datastore successfully.
"""
def __init__(self, shard_state):
"""Constructor.
Args:
shard_state: current mapreduce shard state as model.ShardState.
"""
self._shard_state = shard_state
def increment(self, counter_name, delta=1):
"""Increment counter value.
Args:
counter_name: name of the counter as string.
delta: increment delta as int.
"""
self._shard_state.counters_map.increment(counter_name, delta)
def flush(self):
"""Flush unsaved counter values."""
pass
# TODO(user): Define what fields should be public.
class Context(object):
"""MapReduce execution context.
The main purpose of Context is to facilitate IO. User code, input reader,
and output writer code can plug in pools (see Pool class) to Context to
batch operations.
There is a single Context instance associated with each worker thread.
It can be accessed via context.get(). handlers.MapperWorkerHandler creates
this instance before any IO code (input_reader, output_writer, user functions)
is called.
Each Pool decides how to batch and when to flush.
Context and all its pools are flushed by the end of a slice.
Upon error in slice execution, what is flushed is undefined. (See _Counters
for an exception).
Properties:
mapreduce_spec: current mapreduce specification as model.MapreduceSpec.
"""
# Current context instance
_local = threading.local()
def __init__(self, mapreduce_spec, shard_state, task_retry_count=0):
"""Constructor.
Args:
mapreduce_spec: mapreduce specification as model.MapreduceSpec.
shard_state: an instance of model.ShardState. This has to be the same
instance as the one MapperWorkerHandler mutates. All mutations are
flushed to datastore in the end of the slice.
task_retry_count: how many times this task has been retried.
"""
self._shard_state = shard_state
self.mapreduce_spec = mapreduce_spec
# TODO(user): Create a hierarchy of Context classes. Certain fields
# like task_retry_count only makes sense in TaskAttemptContext.
self.task_retry_count = task_retry_count
if self.mapreduce_spec:
self.mapreduce_id = self.mapreduce_spec.mapreduce_id
else:
# Only in tests
self.mapreduce_id = None
if shard_state:
self.shard_id = shard_state.get_shard_id()
else:
# Only in tests
self.shard_id = None
# TODO(user): Allow user to specify max entity count for the pool
# as they know how big their entities are.
self._mutation_pool = _MutationPool(mapreduce_spec=mapreduce_spec)
self._counters = _Counters(shard_state)
# TODO(user): Remove this after fixing
# keyhole/dataeng/imagery/feeds/client_lib.py in another CL.
self.counters = self._counters
self._pools = {}
self.register_pool("mutation_pool", self._mutation_pool)
self.register_pool("counters", self.counters)
def flush(self):
"""Flush all information recorded in context."""
for pool in self._pools.values():
pool.flush()
def register_pool(self, key, pool):
"""Register an arbitrary pool to be flushed together with this context.
Args:
key: pool key as string.
pool: a pool instance.
"""
self._pools[key] = pool
def get_pool(self, key):
"""Obtains an instance of registered pool.
Args:
key: pool key as string.
Returns:
an instance of the pool registered earlier, or None.
"""
return self._pools.get(key, None)
@classmethod
def _set(cls, context):
"""Set current context instance.
Args:
context: new context as Context or None.
"""
cls._local._context_instance = context
# This method is intended for user code to access context instance.
# MR framework should still try to take context as an explicit argument
# whenever possible (dependency injection).
def get():
"""Get current context instance.
Returns:
current context as Context.
"""
if not hasattr(Context._local, "_context_instance") :
return None
return Context._local._context_instance
| apache-2.0 |
apyrgio/synnefo | snf-pithos-backend/pithos/backends/lib/hashfiler/archipelagomapper.py | 6 | 3867 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ctypes
import ConfigParser
import logging
from archipelago.common import (
Request,
xseg_reply_map,
xseg_reply_map_scatterlist,
string_at,
XF_ASSUMEV0,
XF_MAPFLAG_READONLY,
)
from pithos.workers import (
glue,
monkey,
)
monkey.patch_Request()
logger = logging.getLogger(__name__)
class ArchipelagoMapper(object):
"""Mapper.
Required constructor parameters: namelen.
"""
namelen = None
def __init__(self, **params):
self.params = params
self.namelen = params['namelen']
cfg = ConfigParser.ConfigParser()
cfg.readfp(open(params['archipelago_cfile']))
self.ioctx_pool = glue.WorkerGlue.ioctx_pool
self.dst_port = int(cfg.getint('mapperd', 'blockerm_port'))
self.mapperd_port = int(cfg.getint('vlmcd', 'mapper_port'))
def map_retr(self, maphash, size):
"""Return as a list, part of the hashes map of an object
at the given block offset.
By default, return the whole hashes map.
"""
hashes = ()
ioctx = self.ioctx_pool.pool_get()
req = Request.get_mapr_request(ioctx, self.mapperd_port,
maphash, offset=0, size=size)
flags = req.get_flags()
flags |= XF_ASSUMEV0
req.set_flags(flags)
req.set_v0_size(size)
req.submit()
req.wait()
ret = req.success()
if ret:
data = req.get_data(xseg_reply_map)
Segsarray = xseg_reply_map_scatterlist * data.contents.cnt
segs = Segsarray.from_address(ctypes.addressof(data.contents.segs))
hashes = [string_at(segs[idx].target, segs[idx].targetlen)
for idx in xrange(len(segs))]
req.put()
else:
req.put()
self.ioctx_pool.pool_put(ioctx)
raise Exception("Could not retrieve Archipelago mapfile.")
req = Request.get_close_request(ioctx, self.mapperd_port,
maphash)
req.submit()
req.wait()
ret = req.success()
if ret is False:
logger.warning("Could not close map %s" % maphash)
pass
req.put()
self.ioctx_pool.pool_put(ioctx)
return hashes
def map_stor(self, maphash, hashes, size, block_size):
"""Store hashes in the given hashes map."""
objects = list()
for h in hashes:
objects.append({'name': h, 'flags': XF_MAPFLAG_READONLY})
ioctx = self.ioctx_pool.pool_get()
req = Request.get_create_request(ioctx, self.mapperd_port,
maphash,
mapflags=XF_MAPFLAG_READONLY,
objects=objects, blocksize=block_size,
size=size)
req.submit()
req.wait()
ret = req.success()
if ret is False:
req.put()
self.ioctx_pool.pool_put(ioctx)
raise IOError("Could not write map %s" % maphash)
req.put()
self.ioctx_pool.pool_put(ioctx)
| gpl-3.0 |
etzhou/edx-platform | lms/djangoapps/courseware/tests/test_footer.py | 57 | 2188 | """
Tests related to the basic footer-switching based off SITE_NAME to ensure
edx.org uses an edx footer but other instances use an Open edX footer.
"""
from nose.plugins.attrib import attr
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from openedx.core.djangoapps.theming.test_util import with_is_edx_domain
@attr('shard_1')
class TestFooter(TestCase):
SOCIAL_MEDIA_NAMES = [
"facebook",
"google_plus",
"twitter",
"linkedin",
"tumblr",
"meetup",
"reddit",
"youtube",
]
SOCIAL_MEDIA_URLS = {
"facebook": "http://www.facebook.com/",
"google_plus": "https://plus.google.com/",
"twitter": "https://twitter.com/",
"linkedin": "http://www.linkedin.com/",
"tumblr": "http://www.tumblr.com/",
"meetup": "http://www.meetup.com/",
"reddit": "http://www.reddit.com/",
"youtube": "https://www.youtube.com/"
}
@with_is_edx_domain(True)
def test_edx_footer(self):
"""
Verify that the homepage, when accessed at edx.org, has the edX footer
"""
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'footer-edx-v3')
@with_is_edx_domain(False)
def test_openedx_footer(self):
"""
Verify that the homepage, when accessed at something other than
edx.org, has the Open edX footer
"""
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'footer-openedx')
@with_is_edx_domain(True)
@override_settings(
SOCIAL_MEDIA_FOOTER_NAMES=SOCIAL_MEDIA_NAMES,
SOCIAL_MEDIA_FOOTER_URLS=SOCIAL_MEDIA_URLS
)
def test_edx_footer_social_links(self):
resp = self.client.get('/')
for name, url in self.SOCIAL_MEDIA_URLS.iteritems():
self.assertContains(resp, url)
self.assertContains(resp, settings.SOCIAL_MEDIA_FOOTER_DISPLAY[name]['title'])
self.assertContains(resp, settings.SOCIAL_MEDIA_FOOTER_DISPLAY[name]['icon'])
| agpl-3.0 |
rlugojr/django | tests/auth_tests/test_checks.py | 44 | 7996 | from django.contrib.auth.checks import (
check_models_permissions, check_user_model,
)
from django.contrib.auth.models import AbstractBaseUser
from django.core import checks
from django.db import models
from django.test import (
SimpleTestCase, override_settings, override_system_checks,
)
from django.test.utils import isolate_apps
from .models import CustomUserNonUniqueUsername
@isolate_apps('auth_tests', attr_name='apps')
@override_system_checks([check_user_model])
class UserModelChecksTests(SimpleTestCase):
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserNonListRequiredFields')
def test_required_fields_is_list(self):
"""REQUIRED_FIELDS should be a list."""
class CustomUserNonListRequiredFields(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = 'date_of_birth'
errors = checks.run_checks(app_configs=self.apps.get_app_configs())
self.assertEqual(errors, [
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
obj=CustomUserNonListRequiredFields,
id='auth.E001',
),
])
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserBadRequiredFields')
def test_username_not_in_required_fields(self):
"""USERNAME_FIELD should not appear in REQUIRED_FIELDS."""
class CustomUserBadRequiredFields(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'date_of_birth']
errors = checks.run_checks(self.apps.get_app_configs())
self.assertEqual(errors, [
checks.Error(
"The field named as the 'USERNAME_FIELD' for a custom user model "
"must not be included in 'REQUIRED_FIELDS'.",
obj=CustomUserBadRequiredFields,
id='auth.E002',
),
])
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserNonUniqueUsername')
def test_username_non_unique(self):
"""
A non-unique USERNAME_FIELD should raise an error only if we use the
default authentication backend. Otherwise, an warning should be raised.
"""
errors = checks.run_checks()
self.assertEqual(errors, [
checks.Error(
"'CustomUserNonUniqueUsername.username' must be "
"unique because it is named as the 'USERNAME_FIELD'.",
obj=CustomUserNonUniqueUsername,
id='auth.E003',
),
])
with self.settings(AUTHENTICATION_BACKENDS=['my.custom.backend']):
errors = checks.run_checks()
self.assertEqual(errors, [
checks.Warning(
"'CustomUserNonUniqueUsername.username' is named as "
"the 'USERNAME_FIELD', but it is not unique.",
hint='Ensure that your authentication backend(s) can handle non-unique usernames.',
obj=CustomUserNonUniqueUsername,
id='auth.W004',
),
])
@override_settings(AUTH_USER_MODEL='auth_tests.BadUser')
def test_is_anonymous_authenticated_methods(self):
"""
<User Model>.is_anonymous/is_authenticated must not be methods.
"""
class BadUser(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
USERNAME_FIELD = 'username'
def is_anonymous(self):
return True
def is_authenticated(self):
return True
errors = checks.run_checks(app_configs=self.apps.get_app_configs())
self.assertEqual(errors, [
checks.Critical(
'%s.is_anonymous must be an attribute or property rather than '
'a method. Ignoring this is a security issue as anonymous '
'users will be treated as authenticated!' % BadUser,
obj=BadUser,
id='auth.C009',
),
checks.Critical(
'%s.is_authenticated must be an attribute or property rather '
'than a method. Ignoring this is a security issue as anonymous '
'users will be treated as authenticated!' % BadUser,
obj=BadUser,
id='auth.C010',
),
])
@isolate_apps('auth_tests', attr_name='apps')
@override_system_checks([check_models_permissions])
class ModelsPermissionsChecksTests(SimpleTestCase):
def test_clashing_default_permissions(self):
class Checked(models.Model):
class Meta:
permissions = [
('change_checked', 'Can edit permission (duplicate)')
]
errors = checks.run_checks(self.apps.get_app_configs())
self.assertEqual(errors, [
checks.Error(
"The permission codenamed 'change_checked' clashes with a builtin "
"permission for model 'auth_tests.Checked'.",
obj=Checked,
id='auth.E005',
),
])
def test_non_clashing_custom_permissions(self):
class Checked(models.Model):
class Meta:
permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
errors = checks.run_checks(self.apps.get_app_configs())
self.assertEqual(errors, [])
def test_clashing_custom_permissions(self):
class Checked(models.Model):
class Meta:
permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
errors = checks.run_checks(self.apps.get_app_configs())
self.assertEqual(errors, [
checks.Error(
"The permission codenamed 'my_custom_permission' is duplicated for "
"model 'auth_tests.Checked'.",
obj=Checked,
id='auth.E006',
),
])
def test_verbose_name_max_length(self):
class Checked(models.Model):
class Meta:
verbose_name = 'some ridiculously long verbose name that is out of control' * 5
errors = checks.run_checks(self.apps.get_app_configs())
self.assertEqual(errors, [
checks.Error(
"The verbose_name of model 'auth_tests.Checked' must be at most 244 "
"characters for its builtin permission names to be at most 255 characters.",
obj=Checked,
id='auth.E007',
),
])
def test_custom_permission_name_max_length(self):
custom_permission_name = 'some ridiculously long verbose name that is out of control' * 5
class Checked(models.Model):
class Meta:
permissions = [
('my_custom_permission', custom_permission_name),
]
errors = checks.run_checks(self.apps.get_app_configs())
self.assertEqual(errors, [
checks.Error(
"The permission named '%s' of model 'auth_tests.Checked' is longer "
"than 255 characters." % custom_permission_name,
obj=Checked,
id='auth.E008',
),
])
def test_empty_default_permissions(self):
class Checked(models.Model):
class Meta:
default_permissions = ()
self.assertEqual(checks.run_checks(self.apps.get_app_configs()), [])
| bsd-3-clause |
muff1nman/Vim | vim/eclim/autoload/eclim/python/rope/refactor/patchedast.py | 29 | 24694 | import collections
import re
import warnings
from rope.base import ast, codeanalyze, exceptions
def get_patched_ast(source, sorted_children=False):
"""Adds ``region`` and ``sorted_children`` fields to nodes
Adds ``sorted_children`` field only if `sorted_children` is True.
"""
return patch_ast(ast.parse(source), source, sorted_children)
def patch_ast(node, source, sorted_children=False):
"""Patches the given node
After calling, each node in `node` will have a new field named
`region` that is a tuple containing the start and end offsets
of the code that generated it.
If `sorted_children` is true, a `sorted_children` field will
be created for each node, too. It is a list containing child
nodes as well as whitespaces and comments that occur between
them.
"""
if hasattr(node, 'region'):
return node
walker = _PatchingASTWalker(source, children=sorted_children)
ast.call_for_nodes(node, walker)
return node
def node_region(patched_ast_node):
"""Get the region of a patched ast node"""
return patched_ast_node.region
def write_ast(patched_ast_node):
"""Extract source form a patched AST node with `sorted_children` field
If the node is patched with sorted_children turned off you can use
`node_region` function for obtaining code using module source code.
"""
result = []
for child in patched_ast_node.sorted_children:
if isinstance(child, ast.AST):
result.append(write_ast(child))
else:
result.append(child)
return ''.join(result)
class MismatchedTokenError(exceptions.RopeError):
pass
class _PatchingASTWalker(object):
def __init__(self, source, children=False):
self.source = _Source(source)
self.children = children
self.lines = codeanalyze.SourceLinesAdapter(source)
self.children_stack = []
Number = object()
String = object()
def __call__(self, node):
method = getattr(self, '_' + node.__class__.__name__, None)
if method is not None:
return method(node)
# ???: Unknown node; what should we do here?
warnings.warn('Unknown node type <%s>; please report!'
% node.__class__.__name__, RuntimeWarning)
node.region = (self.source.offset, self.source.offset)
if self.children:
node.sorted_children = ast.get_children(node)
def _handle(self, node, base_children, eat_parens=False, eat_spaces=False):
if hasattr(node, 'region'):
# ???: The same node was seen twice; what should we do?
warnings.warn(
'Node <%s> has been already patched; please report!' %
node.__class__.__name__, RuntimeWarning)
return
base_children = collections.deque(base_children)
self.children_stack.append(base_children)
children = collections.deque()
formats = []
suspected_start = self.source.offset
start = suspected_start
first_token = True
while base_children:
child = base_children.popleft()
if child is None:
continue
offset = self.source.offset
if isinstance(child, ast.AST):
ast.call_for_nodes(child, self)
token_start = child.region[0]
else:
if child is self.String:
region = self.source.consume_string(
end=self._find_next_statement_start())
elif child is self.Number:
region = self.source.consume_number()
elif child == '!=':
# INFO: This has been added to handle deprecated ``<>``
region = self.source.consume_not_equal()
else:
region = self.source.consume(child)
child = self.source[region[0]:region[1]]
token_start = region[0]
if not first_token:
formats.append(self.source[offset:token_start])
if self.children:
children.append(self.source[offset:token_start])
else:
first_token = False
start = token_start
if self.children:
children.append(child)
start = self._handle_parens(children, start, formats)
if eat_parens:
start = self._eat_surrounding_parens(
children, suspected_start, start)
if eat_spaces:
if self.children:
children.appendleft(self.source[0:start])
end_spaces = self.source[self.source.offset:]
self.source.consume(end_spaces)
if self.children:
children.append(end_spaces)
start = 0
if self.children:
node.sorted_children = children
node.region = (start, self.source.offset)
self.children_stack.pop()
def _handle_parens(self, children, start, formats):
"""Changes `children` and returns new start"""
opens, closes = self._count_needed_parens(formats)
old_end = self.source.offset
new_end = None
for i in range(closes):
new_end = self.source.consume(')')[1]
if new_end is not None:
if self.children:
children.append(self.source[old_end:new_end])
new_start = start
for i in range(opens):
new_start = self.source.rfind_token('(', 0, new_start)
if new_start != start:
if self.children:
children.appendleft(self.source[new_start:start])
start = new_start
return start
def _eat_surrounding_parens(self, children, suspected_start, start):
index = self.source.rfind_token('(', suspected_start, start)
if index is not None:
old_start = start
old_offset = self.source.offset
start = index
if self.children:
children.appendleft(self.source[start + 1:old_start])
children.appendleft('(')
token_start, token_end = self.source.consume(')')
if self.children:
children.append(self.source[old_offset:token_start])
children.append(')')
return start
def _count_needed_parens(self, children):
start = 0
opens = 0
for child in children:
if not isinstance(child, basestring):
continue
if child == '' or child[0] in '\'"':
continue
index = 0
while index < len(child):
if child[index] == ')':
if opens > 0:
opens -= 1
else:
start += 1
if child[index] == '(':
opens += 1
if child[index] == '#':
try:
index = child.index('\n', index)
except ValueError:
break
index += 1
return start, opens
def _find_next_statement_start(self):
for children in reversed(self.children_stack):
for child in children:
if isinstance(child, ast.stmt):
return self.lines.get_line_start(child.lineno)
return len(self.source.source)
_operators = {'And': 'and', 'Or': 'or', 'Add': '+', 'Sub': '-', 'Mult': '*',
'Div': '/', 'Mod': '%', 'Pow': '**', 'LShift': '<<',
'RShift': '>>', 'BitOr': '|', 'BitAnd': '&', 'BitXor': '^',
'FloorDiv': '//', 'Invert': '~', 'Not': 'not', 'UAdd': '+',
'USub': '-', 'Eq': '==', 'NotEq': '!=', 'Lt': '<',
'LtE': '<=', 'Gt': '>', 'GtE': '>=', 'Is': 'is',
'IsNot': 'is not', 'In': 'in', 'NotIn': 'not in'}
def _get_op(self, node):
return self._operators[node.__class__.__name__].split(' ')
def _Attribute(self, node):
self._handle(node, [node.value, '.', node.attr])
def _Assert(self, node):
children = ['assert', node.test]
if node.msg:
children.append(',')
children.append(node.msg)
self._handle(node, children)
def _Assign(self, node):
children = self._child_nodes(node.targets, '=')
children.append('=')
children.append(node.value)
self._handle(node, children)
def _AugAssign(self, node):
children = [node.target]
children.extend(self._get_op(node.op))
children.extend(['=', node.value])
self._handle(node, children)
def _Repr(self, node):
self._handle(node, ['`', node.value, '`'])
def _BinOp(self, node):
children = [node.left] + self._get_op(node.op) + [node.right]
self._handle(node, children)
def _BoolOp(self, node):
self._handle(node, self._child_nodes(node.values,
self._get_op(node.op)[0]))
def _Break(self, node):
self._handle(node, ['break'])
def _Call(self, node):
children = [node.func, '(']
args = list(node.args) + node.keywords
children.extend(self._child_nodes(args, ','))
if node.starargs is not None:
if args:
children.append(',')
children.extend(['*', node.starargs])
if node.kwargs is not None:
if args or node.starargs is not None:
children.append(',')
children.extend(['**', node.kwargs])
children.append(')')
self._handle(node, children)
def _ClassDef(self, node):
children = []
if getattr(node, 'decorator_list', None):
for decorator in node.decorator_list:
children.append('@')
children.append(decorator)
children.extend(['class', node.name])
if node.bases:
children.append('(')
children.extend(self._child_nodes(node.bases, ','))
children.append(')')
children.append(':')
children.extend(node.body)
self._handle(node, children)
def _Compare(self, node):
children = []
children.append(node.left)
for op, expr in zip(node.ops, node.comparators):
children.extend(self._get_op(op))
children.append(expr)
self._handle(node, children)
def _Delete(self, node):
self._handle(node, ['del'] + self._child_nodes(node.targets, ','))
def _Num(self, node):
self._handle(node, [self.Number])
def _Str(self, node):
self._handle(node, [self.String])
def _Continue(self, node):
self._handle(node, ['continue'])
def _Dict(self, node):
children = []
children.append('{')
if node.keys:
for index, (key, value) in enumerate(zip(node.keys, node.values)):
children.extend([key, ':', value])
if index < len(node.keys) - 1:
children.append(',')
children.append('}')
self._handle(node, children)
def _Ellipsis(self, node):
self._handle(node, ['...'])
def _Expr(self, node):
self._handle(node, [node.value])
def _Exec(self, node):
children = []
children.extend(['exec', node.body])
if node.globals:
children.extend(['in', node.globals])
if node.locals:
children.extend([',', node.locals])
self._handle(node, children)
def _ExtSlice(self, node):
children = []
for index, dim in enumerate(node.dims):
if index > 0:
children.append(',')
children.append(dim)
self._handle(node, children)
def _For(self, node):
children = ['for', node.target, 'in', node.iter, ':']
children.extend(node.body)
if node.orelse:
children.extend(['else', ':'])
children.extend(node.orelse)
self._handle(node, children)
def _ImportFrom(self, node):
children = ['from']
if node.level:
children.append('.' * node.level)
children.extend([node.module, 'import'])
children.extend(self._child_nodes(node.names, ','))
self._handle(node, children)
def _alias(self, node):
children = [node.name]
if node.asname:
children.extend(['as', node.asname])
self._handle(node, children)
def _FunctionDef(self, node):
children = []
try:
decorators = getattr(node, 'decorator_list')
except AttributeError:
decorators = getattr(node, 'decorators', None)
if decorators:
for decorator in decorators:
children.append('@')
children.append(decorator)
children.extend(['def', node.name, '(', node.args])
children.extend([')', ':'])
children.extend(node.body)
self._handle(node, children)
def _arguments(self, node):
children = []
args = list(node.args)
defaults = [None] * (len(args) - len(node.defaults)) + list(node.defaults)
for index, (arg, default) in enumerate(zip(args, defaults)):
if index > 0:
children.append(',')
self._add_args_to_children(children, arg, default)
if node.vararg is not None:
if args:
children.append(',')
children.extend(['*', node.vararg])
if node.kwarg is not None:
if args or node.vararg is not None:
children.append(',')
children.extend(['**', node.kwarg])
self._handle(node, children)
def _add_args_to_children(self, children, arg, default):
if isinstance(arg, (list, tuple)):
self._add_tuple_parameter(children, arg)
else:
children.append(arg)
if default is not None:
children.append('=')
children.append(default)
def _add_tuple_parameter(self, children, arg):
children.append('(')
for index, token in enumerate(arg):
if index > 0:
children.append(',')
if isinstance(token, (list, tuple)):
self._add_tuple_parameter(children, token)
else:
children.append(token)
children.append(')')
def _GeneratorExp(self, node):
children = [node.elt]
children.extend(node.generators)
self._handle(node, children, eat_parens=True)
def _comprehension(self, node):
children = ['for', node.target, 'in', node.iter]
if node.ifs:
for if_ in node.ifs:
children.append('if')
children.append(if_)
self._handle(node, children)
def _Global(self, node):
children = self._child_nodes(node.names, ',')
children.insert(0, 'global')
self._handle(node, children)
def _If(self, node):
if self._is_elif(node):
children = ['elif']
else:
children = ['if']
children.extend([node.test, ':'])
children.extend(node.body)
if node.orelse:
if len(node.orelse) == 1 and self._is_elif(node.orelse[0]):
pass
else:
children.extend(['else', ':'])
children.extend(node.orelse)
self._handle(node, children)
def _is_elif(self, node):
if not isinstance(node, ast.If):
return False
offset = self.lines.get_line_start(node.lineno) + node.col_offset
word = self.source[offset:offset + 4]
# XXX: This is a bug; the offset does not point to the first
alt_word = self.source[offset - 5:offset - 1]
return 'elif' in (word, alt_word)
def _IfExp(self, node):
return self._handle(node, [node.body, 'if', node.test,
'else', node.orelse])
def _Import(self, node):
children = ['import']
children.extend(self._child_nodes(node.names, ','))
self._handle(node, children)
def _keyword(self, node):
self._handle(node, [node.arg, '=', node.value])
def _Lambda(self, node):
self._handle(node, ['lambda', node.args, ':', node.body])
def _List(self, node):
self._handle(node, ['['] + self._child_nodes(node.elts, ',') + [']'])
def _ListComp(self, node):
children = ['[', node.elt]
children.extend(node.generators)
children.append(']')
self._handle(node, children)
def _Module(self, node):
self._handle(node, list(node.body), eat_spaces=True)
def _Name(self, node):
self._handle(node, [node.id])
def _Pass(self, node):
self._handle(node, ['pass'])
def _Print(self, node):
children = ['print']
if node.dest:
children.extend(['>>', node.dest])
if node.values:
children.append(',')
children.extend(self._child_nodes(node.values, ','))
if not node.nl:
children.append(',')
self._handle(node, children)
def _Raise(self, node):
children = ['raise']
if node.type:
children.append(node.type)
if node.inst:
children.append(',')
children.append(node.inst)
if node.tback:
children.append(',')
children.append(node.tback)
self._handle(node, children)
def _Return(self, node):
children = ['return']
if node.value:
children.append(node.value)
self._handle(node, children)
def _Sliceobj(self, node):
children = []
for index, slice in enumerate(node.nodes):
if index > 0:
children.append(':')
if slice:
children.append(slice)
self._handle(node, children)
def _Index(self, node):
self._handle(node, [node.value])
def _Subscript(self, node):
self._handle(node, [node.value, '[', node.slice, ']'])
def _Slice(self, node):
children = []
if node.lower:
children.append(node.lower)
children.append(':')
if node.upper:
children.append(node.upper)
if node.step:
children.append(':')
children.append(node.step)
self._handle(node, children)
def _TryFinally(self, node):
children = []
if len(node.body) != 1 or not isinstance(node.body[0], ast.TryExcept):
children.extend(['try', ':'])
children.extend(node.body)
children.extend(['finally', ':'])
children.extend(node.finalbody)
self._handle(node, children)
def _TryExcept(self, node):
children = ['try', ':']
children.extend(node.body)
children.extend(node.handlers)
if node.orelse:
children.extend(['else', ':'])
children.extend(node.orelse)
self._handle(node, children)
def _ExceptHandler(self, node):
self._excepthandler(node)
def _excepthandler(self, node):
children = ['except']
if node.type:
children.append(node.type)
if node.name:
children.extend([',', node.name])
children.append(':')
children.extend(node.body)
self._handle(node, children)
def _Tuple(self, node):
if node.elts:
self._handle(node, self._child_nodes(node.elts, ','),
eat_parens=True)
else:
self._handle(node, ['(', ')'])
def _UnaryOp(self, node):
children = self._get_op(node.op)
children.append(node.operand)
self._handle(node, children)
def _Yield(self, node):
children = ['yield']
if node.value:
children.append(node.value)
self._handle(node, children)
def _While(self, node):
children = ['while', node.test, ':']
children.extend(node.body)
if node.orelse:
children.extend(['else', ':'])
children.extend(node.orelse)
self._handle(node, children)
def _With(self, node):
children = ['with', node.context_expr]
if node.optional_vars:
children.extend(['as', node.optional_vars])
children.append(':')
children.extend(node.body)
self._handle(node, children)
def _child_nodes(self, nodes, separator):
children = []
for index, child in enumerate(nodes):
children.append(child)
if index < len(nodes) - 1:
children.append(separator)
return children
class _Source(object):
def __init__(self, source):
self.source = source
self.offset = 0
def consume(self, token):
try:
while True:
new_offset = self.source.index(token, self.offset)
if self._good_token(token, new_offset):
break
else:
self._skip_comment()
except (ValueError, TypeError):
raise MismatchedTokenError(
'Token <%s> at %s cannot be matched' %
(token, self._get_location()))
self.offset = new_offset + len(token)
return (new_offset, self.offset)
def consume_string(self, end=None):
if _Source._string_pattern is None:
original = codeanalyze.get_string_pattern()
pattern = r'(%s)((\s|\\\n|#[^\n]*\n)*(%s))*' % \
(original, original)
_Source._string_pattern = re.compile(pattern)
repattern = _Source._string_pattern
return self._consume_pattern(repattern, end)
def consume_number(self):
if _Source._number_pattern is None:
_Source._number_pattern = re.compile(
self._get_number_pattern())
repattern = _Source._number_pattern
return self._consume_pattern(repattern)
def consume_not_equal(self):
if _Source._not_equals_pattern is None:
_Source._not_equals_pattern = re.compile(r'<>|!=')
repattern = _Source._not_equals_pattern
return self._consume_pattern(repattern)
def _good_token(self, token, offset, start=None):
"""Checks whether consumed token is in comments"""
if start is None:
start = self.offset
try:
comment_index = self.source.rindex('#', start, offset)
except ValueError:
return True
try:
new_line_index = self.source.rindex('\n', start, offset)
except ValueError:
return False
return comment_index < new_line_index
def _skip_comment(self):
self.offset = self.source.index('\n', self.offset + 1)
def _get_location(self):
lines = self.source[:self.offset].split('\n')
return (len(lines), len(lines[-1]))
def _consume_pattern(self, repattern, end=None):
while True:
if end is None:
end = len(self.source)
match = repattern.search(self.source, self.offset, end)
if self._good_token(match.group(), match.start()):
break
else:
self._skip_comment()
self.offset = match.end()
return match.start(), match.end()
def till_token(self, token):
new_offset = self.source.index(token, self.offset)
return self[self.offset:new_offset]
def rfind_token(self, token, start, end):
index = start
while True:
try:
index = self.source.rindex(token, start, end)
if self._good_token(token, index, start=start):
return index
else:
end = index
except ValueError:
return None
def from_offset(self, offset):
return self[offset:self.offset]
def find_backwards(self, pattern, offset):
return self.source.rindex(pattern, 0, offset)
def __getitem__(self, index):
return self.source[index]
def __getslice__(self, i, j):
return self.source[i:j]
def _get_number_pattern(self):
# HACK: It is merely an approaximation and does the job
integer = r'(0|0x)?[\da-fA-F]+[lL]?'
return r'(%s(\.\d*)?|(\.\d+))([eE][-+]?\d*)?[jJ]?' % integer
_string_pattern = None
_number_pattern = None
_not_equals_pattern = None
| mit |
ruslanloman/nova | nova/api/openstack/compute/contrib/evacuate.py | 61 | 4643 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import utils
authorize = extensions.extension_authorizer('compute', 'evacuate')
class Controller(wsgi.Controller):
def __init__(self, ext_mgr, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.host_api = compute.HostAPI()
self.ext_mgr = ext_mgr
@wsgi.action('evacuate')
def _evacuate(self, req, id, body):
"""Permit admins to evacuate a server from a failed host
to a new one.
If host is empty, the scheduler will select one.
"""
context = req.environ["nova.context"]
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
if not self.is_valid_body(body, "evacuate"):
raise exc.HTTPBadRequest(_("Malformed request body"))
evacuate_body = body["evacuate"]
host = evacuate_body.get("host")
if (not host and
not self.ext_mgr.is_loaded('os-extended-evacuate-find-host')):
msg = _("host must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
try:
on_shared_storage = strutils.bool_from_string(
evacuate_body["onSharedStorage"])
except (TypeError, KeyError):
msg = _("onSharedStorage must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
password = None
if 'adminPass' in evacuate_body:
# check that if requested to evacuate server on shared storage
# password not specified
if on_shared_storage:
msg = _("admin password can't be changed on existing disk")
raise exc.HTTPBadRequest(explanation=msg)
password = evacuate_body['adminPass']
elif not on_shared_storage:
password = utils.generate_password()
if host is not None:
try:
self.host_api.service_get_by_compute_host(context, host)
except exception.NotFound:
msg = _("Compute host %s not found.") % host
raise exc.HTTPNotFound(explanation=msg)
instance = common.get_instance(self.compute_api, context, id)
try:
if instance.host == host:
msg = _("The target host can't be the same one.")
raise exc.HTTPBadRequest(explanation=msg)
self.compute_api.evacuate(context, instance, host,
on_shared_storage, password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate', id)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceInUse as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if password:
return {'adminPass': password}
class Evacuate(extensions.ExtensionDescriptor):
"""Enables server evacuation."""
name = "Evacuate"
alias = "os-evacuate"
namespace = "http://docs.openstack.org/compute/ext/evacuate/api/v2"
updated = "2013-01-06T00:00:00Z"
def get_controller_extensions(self):
controller = Controller(self.ext_mgr)
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
cernops/ceilometer | ceilometer/storage/hbase/utils.py | 9 | 18195 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Various HBase helpers
"""
import copy
import datetime
import json
import bson.json_util
from happybase.hbase import ttypes
from oslo_log import log
import six
from ceilometer.i18n import _
from ceilometer import utils
LOG = log.getLogger(__name__)
EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3,
'datetime': 4}
OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='}
# We need this additional dictionary because we have reverted timestamp in
# row-keys for stored metrics
OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<',
'ge': '<='}
def _QualifierFilter(op, qualifier):
return "QualifierFilter (%s, 'binaryprefix:m_%s')" % (op, qualifier)
def timestamp(dt, reverse=True):
"""Timestamp is count of milliseconds since start of epoch.
If reverse=True then timestamp will be reversed. Such a technique is used
in HBase rowkey design when period queries are required. Because of the
fact that rows are sorted lexicographically it's possible to vary whether
the 'oldest' entries will be on top of the table or it should be the newest
ones (reversed timestamp case).
:param dt: datetime which is translated to timestamp
:param reverse: a boolean parameter for reverse or straight count of
timestamp in milliseconds
:return: count or reversed count of milliseconds since start of epoch
"""
epoch = datetime.datetime(1970, 1, 1)
td = dt - epoch
ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000
return 0x7fffffffffffffff - ts if reverse else ts
def make_events_query_from_filter(event_filter):
"""Return start and stop row for filtering and a query.
Query is based on the selected parameter.
:param event_filter: storage.EventFilter object.
"""
start = "%s" % (timestamp(event_filter.start_timestamp, reverse=False)
if event_filter.start_timestamp else "")
stop = "%s" % (timestamp(event_filter.end_timestamp, reverse=False)
if event_filter.end_timestamp else "")
kwargs = {'event_type': event_filter.event_type,
'event_id': event_filter.message_id}
res_q = make_query(**kwargs)
if event_filter.traits_filter:
for trait_filter in event_filter.traits_filter:
q_trait = make_query(trait_query=True, **trait_filter)
if q_trait:
if res_q:
res_q += " AND " + q_trait
else:
res_q = q_trait
return res_q, start, stop
def make_timestamp_query(func, start=None, start_op=None, end=None,
end_op=None, bounds_only=False, **kwargs):
"""Return a filter start and stop row for filtering and a query.
Query is based on the fact that CF-name is 'rts'.
:param start: Optional start timestamp
:param start_op: Optional start timestamp operator, like gt, ge
:param end: Optional end timestamp
:param end_op: Optional end timestamp operator, like lt, le
:param bounds_only: if True than query will not be returned
:param func: a function that provide a format of row
:param kwargs: kwargs for :param func
"""
# We don't need to dump here because get_start_end_rts returns strings
rts_start, rts_end = get_start_end_rts(start, end)
start_row, end_row = func(rts_start, rts_end, **kwargs)
if bounds_only:
return start_row, end_row
q = []
start_op = start_op or 'ge'
end_op = end_op or 'lt'
if rts_start:
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
(OP_SIGN_REV[start_op], rts_start))
if rts_end:
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
(OP_SIGN_REV[end_op], rts_end))
res_q = None
if len(q):
res_q = " AND ".join(q)
return start_row, end_row, res_q
def get_start_end_rts(start, end):
rts_start = str(timestamp(start)) if start else ""
rts_end = str(timestamp(end)) if end else ""
return rts_start, rts_end
def make_query(metaquery=None, trait_query=None, **kwargs):
"""Return a filter query string based on the selected parameters.
:param metaquery: optional metaquery dict
:param trait_query: optional boolean, for trait_query from kwargs
:param kwargs: key-value pairs to filter on. Key should be a real
column name in db
"""
q = []
res_q = None
# Query for traits differs from others. It is constructed with
# SingleColumnValueFilter with the possibility to choose comparison
# operator
if trait_query:
trait_name = kwargs.pop('key')
op = kwargs.pop('op', 'eq')
for k, v in kwargs.items():
if v is not None:
res_q = ("SingleColumnValueFilter "
"('f', '%s', %s, 'binary:%s', true, true)" %
(prepare_key(trait_name, EVENT_TRAIT_TYPES[k]),
OP_SIGN[op], dump(v)))
return res_q
# Note: we use extended constructor for SingleColumnValueFilter here.
# It is explicitly specified that entry should not be returned if CF is not
# found in table.
for key, value in sorted(kwargs.items()):
if value is not None:
if key == 'source':
q.append("SingleColumnValueFilter "
"('f', 's_%s', =, 'binary:%s', true, true)" %
(value, dump('1')))
elif key == 'trait_type':
q.append("ColumnPrefixFilter('%s')" % value)
elif key == 'event_id':
q.append("RowFilter ( = , 'regexstring:\d*:%s')" % value)
else:
q.append("SingleColumnValueFilter "
"('f', '%s', =, 'binary:%s', true, true)" %
(quote(key), dump(value)))
res_q = None
if len(q):
res_q = " AND ".join(q)
if metaquery:
meta_q = []
for k, v in metaquery.items():
meta_q.append(
"SingleColumnValueFilter ('f', '%s', =, 'binary:%s', "
"true, true)"
% ('r_' + k, dump(v)))
meta_q = " AND ".join(meta_q)
# join query and metaquery
if res_q is not None:
res_q += " AND " + meta_q
else:
res_q = meta_q # metaquery only
return res_q
def get_meter_columns(metaquery=None, need_timestamp=False, **kwargs):
"""Return a list of required columns in meter table to be scanned.
SingleColumnFilter has 'columns' filter that should be used to determine
what columns we are interested in. But if we want to use 'filter' and
'columns' together we have to include columns we are filtering by
to columns list.
Please see an example: If we make scan with filter
"SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')"
and columns ['f:rts'], the output will be always empty
because only 'rts' will be returned and filter will be applied
to this data so 's_test-1' cannot be find.
To make this request correct it should be fixed as follows:
filter = "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')",
columns = ['f:rts','f:s_test-1']}
:param metaquery: optional metaquery dict
:param need_timestamp: flag, which defines the need for timestamp columns
:param kwargs: key-value pairs to filter on. Key should be a real
column name in db
"""
columns = ['f:message', 'f:recorded_at']
columns.extend("f:%s" % k for k, v in kwargs.items()
if v is not None)
if metaquery:
columns.extend("f:r_%s" % k for k, v in metaquery.items()
if v is not None)
source = kwargs.get('source')
if source:
columns.append("f:s_%s" % source)
if need_timestamp:
columns.extend(['f:rts', 'f:timestamp'])
return columns
def make_sample_query_from_filter(sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param sample_filter: SampleFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
meter = sample_filter.meter
if not meter and require_meter:
raise RuntimeError('Missing required meter specifier')
start_row, end_row, ts_query = make_timestamp_query(
make_general_rowkey_scan,
start=sample_filter.start_timestamp,
start_op=sample_filter.start_timestamp_op,
end=sample_filter.end_timestamp,
end_op=sample_filter.end_timestamp_op,
some_id=meter)
kwargs = dict(user_id=sample_filter.user,
project_id=sample_filter.project,
counter_name=meter,
resource_id=sample_filter.resource,
source=sample_filter.source,
message_id=sample_filter.message_id)
q = make_query(metaquery=sample_filter.metaquery, **kwargs)
if q:
res_q = q + " AND " + ts_query if ts_query else q
else:
res_q = ts_query if ts_query else None
need_timestamp = (sample_filter.start_timestamp or
sample_filter.end_timestamp) is not None
columns = get_meter_columns(metaquery=sample_filter.metaquery,
need_timestamp=need_timestamp, **kwargs)
return res_q, start_row, end_row, columns
def make_meter_query_for_resource(start_timestamp, start_timestamp_op,
end_timestamp, end_timestamp_op, source,
query=None):
"""This method is used when Resource table should be filtered by meters.
In this method we are looking into all qualifiers with m_ prefix.
:param start_timestamp: meter's timestamp start range.
:param start_timestamp_op: meter's start time operator, like ge, gt.
:param end_timestamp: meter's timestamp end range.
:param end_timestamp_op: meter's end time operator, like lt, le.
:param source: source filter.
:param query: a query string to concatenate with.
"""
start_rts, end_rts = get_start_end_rts(start_timestamp, end_timestamp)
mq = []
start_op = start_timestamp_op or 'ge'
end_op = end_timestamp_op or 'lt'
if start_rts:
filter_value = (start_rts + ':' + quote(source) if source
else start_rts)
mq.append(_QualifierFilter(OP_SIGN_REV[start_op], filter_value))
if end_rts:
filter_value = (end_rts + ':' + quote(source) if source
else end_rts)
mq.append(_QualifierFilter(OP_SIGN_REV[end_op], filter_value))
if mq:
meter_q = " AND ".join(mq)
# If there is a filtering on time_range we need to point that
# qualifiers should start with m_. Overwise in case e.g.
# QualifierFilter (>=, 'binaryprefix:m_9222030811134775808')
# qualifier 's_test' satisfies the filter and will be returned.
meter_q = _QualifierFilter("=", '') + " AND " + meter_q
query = meter_q if not query else query + " AND " + meter_q
return query
def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None):
"""If it's filter on some_id without start and end.
start_row = some_id while end_row = some_id + MAX_BYTE.
"""
if some_id is None:
return None, None
if not rts_start:
# NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123
# will be quoted and character will be turn in a composition that is
# started with '%' (chr(37)) that lexicographically is less then chr
# of number
rts_start = chr(122)
end_row = prepare_key(some_id, rts_start)
start_row = prepare_key(some_id, rts_end)
return start_row, end_row
def prepare_key(*args):
"""Prepares names for rows and columns with correct separator.
:param args: strings or numbers that we want our key construct of
:return: key with quoted args that are separated with character ":"
"""
key_quote = []
for key in args:
if isinstance(key, six.integer_types):
key = str(key)
key_quote.append(quote(key))
return ":".join(key_quote)
def timestamp_from_record_tuple(record):
"""Extract timestamp from HBase tuple record."""
return record[0]['timestamp']
def resource_id_from_record_tuple(record):
"""Extract resource_id from HBase tuple record."""
return record[0]['resource_id']
def deserialize_entry(entry, get_raw_meta=True):
"""Return a list of flatten_result, sources, meters and metadata.
Flatten_result contains a dict of simple structures such as 'resource_id':1
sources/meters are the lists of sources and meters correspondingly.
metadata is metadata dict. This dict may be returned as flattened if
get_raw_meta is False.
:param entry: entry from HBase, without row name and timestamp
:param get_raw_meta: If true then raw metadata will be returned,
if False metadata will be constructed from
'f:r_metadata.' fields
"""
flatten_result = {}
sources = []
meters = []
metadata_flattened = {}
for k, v in entry.items():
if k.startswith('f:s_'):
sources.append(decode_unicode(k[4:]))
elif k.startswith('f:r_metadata.'):
qualifier = decode_unicode(k[len('f:r_metadata.'):])
metadata_flattened[qualifier] = load(v)
elif k.startswith("f:m_"):
meter = ([unquote(i) for i in k[4:].split(':')], load(v))
meters.append(meter)
else:
if ':' in k[2:]:
key = tuple([unquote(i) for i in k[2:].split(':')])
else:
key = unquote(k[2:])
flatten_result[key] = load(v)
if get_raw_meta:
metadata = flatten_result.get('resource_metadata', {})
else:
metadata = metadata_flattened
return flatten_result, sources, meters, metadata
def serialize_entry(data=None, **kwargs):
"""Return a dict that is ready to be stored to HBase
:param data: dict to be serialized
:param kwargs: additional args
"""
data = data or {}
entry_dict = copy.copy(data)
entry_dict.update(**kwargs)
result = {}
for k, v in entry_dict.items():
if k == 'source':
# user, project and resource tables may contain several sources.
# Besides, resource table may contain several meters.
# To make insertion safe we need to store all meters and sources in
# a separate cell. For this purpose s_ and m_ prefixes are
# introduced.
qualifier = encode_unicode('f:s_%s' % v)
result[qualifier] = dump('1')
elif k == 'meter':
for meter, ts in v.items():
qualifier = encode_unicode('f:m_%s' % meter)
result[qualifier] = dump(ts)
elif k == 'resource_metadata':
# keep raw metadata as well as flattened to provide
# capability with API v2. It will be flattened in another
# way on API level. But we need flattened too for quick filtering.
flattened_meta = dump_metadata(v)
for key, m in flattened_meta.items():
metadata_qualifier = encode_unicode('f:r_metadata.' + key)
result[metadata_qualifier] = dump(m)
result['f:resource_metadata'] = dump(v)
else:
result['f:' + quote(k, ':')] = dump(v)
return result
def dump_metadata(meta):
resource_metadata = {}
for key, v in utils.dict_to_keyval(meta):
resource_metadata[key] = v
return resource_metadata
def dump(data):
return json.dumps(data, default=bson.json_util.default)
def load(data):
return json.loads(data, object_hook=object_hook)
def encode_unicode(data):
return data.encode('utf-8') if isinstance(data, six.text_type) else data
def decode_unicode(data):
return data.decode('utf-8') if isinstance(data, six.string_types) else data
# We don't want to have tzinfo in decoded json.This object_hook is
# overwritten json_util.object_hook for $date
def object_hook(dct):
if "$date" in dct:
dt = bson.json_util.object_hook(dct)
return dt.replace(tzinfo=None)
return bson.json_util.object_hook(dct)
def create_tables(conn, tables, column_families):
for table in tables:
try:
conn.create_table(table, column_families)
except ttypes.AlreadyExists:
if conn.table_prefix:
table = ("%(table_prefix)s"
"%(separator)s"
"%(table_name)s" %
dict(table_prefix=conn.table_prefix,
separator=conn.table_prefix_separator,
table_name=table))
LOG.warn(_("Cannot create table %(table_name)s "
"it already exists. Ignoring error")
% {'table_name': table})
def quote(s, *args):
"""Return quoted string even if it is unicode one.
:param s: string that should be quoted
:param args: any symbol we want to stay unquoted
"""
s_en = s.encode('utf8')
return six.moves.urllib.parse.quote(s_en, *args)
def unquote(s):
"""Return unquoted and decoded string.
:param s: string that should be unquoted
"""
s_de = six.moves.urllib.parse.unquote(s)
return s_de.decode('utf8')
| apache-2.0 |
qupai/git-repo | subcmds/sync.py | 4 | 28299 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import shutil
import socket
import subprocess
import sys
import time
from pyversion import is_python3
if is_python3():
import urllib.parse
import xmlrpc.client
else:
import imp
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.parse = urlparse
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from git_command import GIT, git_require
from git_refs import R_HEADS, HEAD
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
SSH Connections
---------------
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
Compatibility
~~~~~~~~~~~~~
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from a known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchProjectList(self, opt, projects, *args, **kwargs):
"""Main function of the fetch threads when jobs are > 1.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and not opt.force_broken:
break
def _FetchHelper(self, opt, project, lock, fetched, pm, sem, err_event):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
err_event: We'll set this event in the case of an error (after printing
out info about the error).
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
if not opt.quiet:
print('Fetching project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
try:
try:
start = time.time()
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync',
file=sys.stderr)
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
err_event.set()
except:
err_event.set()
raise
finally:
if did_lock:
lock.release()
sem.release()
return success
def _Fetch(self, projects, opt):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects))
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and not opt.force_broken:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
lock=lock,
fetched=fetched,
pm=pm,
sem=sem,
err_event=err_event)
if self.jobs > 1:
t = _threading.Thread(target = self._FetchProjectList,
kwargs = kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
gitdirs = {}
for project in projects:
gitdirs[project.gitdir] = project.bare_git
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for bare_git in gitdirs.values():
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
if os.path.exists(self.manifest.topdir + '/' + path):
gitdir = os.path.join(self.manifest.topdir, path, '.git')
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = gitdir,
objdir = gitdir,
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes '
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return -1
else:
print('Deleting obsolete path %s' % project.worktree,
file=sys.stderr)
shutil.rmtree(project.worktree)
# Try deleting parent subdirs if they are empty
project_dir = os.path.dirname(project.worktree)
while project_dir != self.manifest.topdir:
try:
os.rmdir(project_dir)
except OSError:
break
project_dir = os.path.dirname(project_dir)
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print('error: cannot combine -n and -d', file=sys.stderr)
sys.exit(1)
if opt.network_only and opt.local_only:
print('error: cannot combine -n and -l', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print('error: cannot combine -m and -s', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print('error: cannot combine -m and -t', file=sys.stderr)
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print('error: -u and -p may only be combined with -s or -t',
file=sys.stderr)
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print('error: both -u and -p must be given', file=sys.stderr)
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
print('.netrc file does not exist or could not be opened',
file=sys.stderr)
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
username, _account, password = \
info.authenticators(parse_result.hostname)
except TypeError:
# TypeError is raised when the given hostname is not present
# in the .netrc file.
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
try:
server = xmlrpc.client.Server(manifest_server)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if 'SYNC_TARGET' in env:
target = env['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = "smart_sync_override.xml"
manifest_path = os.path.join(self.manifest.manifestProject.worktree,
manifest_name)
try:
f = open(manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError:
print('error: cannot write manifest to %s' % manifest_path,
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags,
optimized_fetch=opt.optimized_fetch)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
mp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
self._ReloadManifest(manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList():
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
project.Sync_LocalHalf(syncbuf)
pm.end()
print(file=sys.stderr)
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
try:
self._times = json.load(f)
finally:
f.close()
except (IOError, ValueError):
try:
os.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'w')
try:
json.dump(self._times, f, indent=2)
finally:
f.close()
except (IOError, TypeError):
try:
os.remove(self._path)
except OSError:
pass
| apache-2.0 |
facebookexperimental/eden | eden/hg-server/edenscm/hgext/hgevents/__init__.py | 2 | 5889 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
"""publishes state-enter and state-leave events to Watchman
Extension that is responsible for publishing state-enter and state-leave
events to Watchman for the following states:
- hg.filemerge
- hg.update
This was originally part of the fsmonitor extension, but it was split into its
own extension that can be used with Eden. (Note that fsmonitor is supposed to be
disabled when 'eden' is in repo.requirements.)
Note that hg.update state changes must be published to Watchman in order for it
to support SCM-aware subscriptions:
https://facebook.github.io/watchman/docs/scm-query.html.
"""
from __future__ import absolute_import
from edenscm.mercurial import extensions, filemerge, merge, perftrace, registrar
from edenscm.mercurial.i18n import _
from ..extlib import watchmanclient
configtable = {}
configitem = registrar.configitem(configtable)
configitem("experimental", "fsmonitor.transaction_notify", default=False)
# This extension is incompatible with the following extensions
# and will disable itself when encountering one of these:
_incompatible_exts = ["largefiles", "eol"]
def extsetup(ui):
extensions.wrapfunction(merge, "update", wrapupdate)
extensions.wrapfunction(filemerge, "_xmerge", _xmerge)
def reposetup(ui, repo):
exts = extensions.enabled()
for ext in _incompatible_exts:
if ext in exts:
ui.warn(
_(
"The hgevents extension is incompatible with the %s "
"extension and has been disabled.\n"
)
% ext
)
return
if not repo.local():
return
# Ensure there is a Watchman client associated with the repo that
# state_update() can use later.
try:
watchmanclient.createclientforrepo(repo)
except Exception as ex:
ui.log("hgevents", "Watchman exception: %s\n", ex)
return
class hgeventsrepo(repo.__class__):
def wlocknostateupdate(self, *args, **kwargs):
return super(hgeventsrepo, self).wlock(*args, **kwargs)
def wlock(self, *args, **kwargs):
l = super(hgeventsrepo, self).wlock(*args, **kwargs)
if not self._eventreporting:
return l
if not self.ui.configbool("experimental", "fsmonitor.transaction_notify"):
return l
if l.held != 1:
return l
origrelease = l.releasefn
def staterelease():
if origrelease:
origrelease()
if l.stateupdate:
with perftrace.trace("Watchman State Exit"):
l.stateupdate.exit()
l.stateupdate = None
try:
l.stateupdate = None
l.stateupdate = watchmanclient.state_update(self, name="hg.transaction")
with perftrace.trace("Watchman State Enter"):
l.stateupdate.enter()
l.releasefn = staterelease
except Exception:
# Swallow any errors; fire and forget
pass
return l
repo.__class__ = hgeventsrepo
# Bracket working copy updates with calls to the watchman state-enter
# and state-leave commands. This allows clients to perform more intelligent
# settling during bulk file change scenarios
# https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
def wrapupdate(
orig,
repo,
node,
branchmerge,
force,
ancestor=None,
mergeancestor=False,
labels=None,
matcher=None,
wc=None,
**kwargs
):
if wc and wc.isinmemory():
# If the working context isn't on disk, there's no need to invoke
# watchman.
return orig(
repo,
node,
branchmerge,
force,
ancestor,
mergeancestor,
labels,
matcher,
wc=wc,
**kwargs
)
distance = 0
partial = True
oldnode = repo["."].node()
newnode = repo[node].node()
if matcher is None or matcher.always():
partial = False
distance = watchmanclient.calcdistance(repo, oldnode, newnode)
with watchmanclient.state_update(
repo,
name="hg.update",
oldnode=oldnode,
newnode=newnode,
distance=distance,
partial=partial,
metadata={"merge": branchmerge},
):
return orig(
repo,
node,
branchmerge,
force,
ancestor,
mergeancestor,
labels,
matcher,
**kwargs
)
def _xmerge(origfunc, repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
# _xmerge is called when an external merge tool is invoked.
with state_filemerge(repo, fcd.path()):
return origfunc(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
class state_filemerge(object):
"""Context manager for single filemerge event"""
def __init__(self, repo, path):
self.repo = repo
self.path = path
def __enter__(self):
self._state("state-enter")
def __exit__(self, errtype, value, tb):
self._state("state-leave")
def _state(self, name):
client = getattr(self.repo, "_watchmanclient", None)
if client:
metadata = {"path": self.path}
try:
client.command(name, {"name": "hg.filemerge", "metadata": metadata})
except Exception:
# State notifications are advisory only, and so errors
# don't block us from performing a checkout
pass
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.